source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
camera_pi.py
|
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (316, 316)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
app_wu1109.py
|
from datetime import datetime
from flask import Flask, request, render_template, session, redirect, Response, flash
from datetime import timedelta
import cv2
# import argparse
from utils import *
import mediapipe as mp
from body_part_angle import BodyPartAngle
from exercise import TypeOfExercise
from sounds.sound import fitness_sound
from interface.interface import TypeOfControl
from game.game import *
import pygame
##子豪新增部分 穿衣
import os
import subprocess
import threading
import math
from virtualtryon.tryon import *
##子豪新增部分 穿衣
from dance_feed import game3_frames
# from bgadd import *
app=Flask(__name__)
app.config['SECRET_KEY'] = '12345' # 設定session加密的金鑰
#ap = argparse.ArgumentParser()
#ap.add_argument("-t",
# "--exercise_type",
# type=str,
# help='Type of activity to do',
# required=True)
#ap.add_argument("-vs",
# "--video_source",
# type=str,
# help='Type of activity to do',
# required=False)
#args = vars(ap.parse_args())
## 強制進首頁使用control模式
args = {}
args['video_source'] = None
args["exercise_type"] = 'control'
args['type'] = 'fitness'
## drawing body
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
## 控制首頁隱藏影像
def control_frames():
## setting the video source
if args["video_source"] is not None:
cap = cv2.VideoCapture(args["video_source"])
else:
cap = cv2.VideoCapture(0) # webcam
w = 640
h = 480
cap.set(3, w) # width
cap.set(4, h) # height
with mp_pose.Pose(min_detection_confidence=0.8,
min_tracking_confidence=0.8) as pose:
counter = 0 # movement of exercise
status = True # state of move
hint = "Ready!"
while cap.isOpened():
ret, frame = cap.read()
# result_screen = np.zeros((250, 400, 3), np.uint8)
frame = cv2.flip(frame,1)
frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
## recolor frame to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame.flags.writeable = False
## make detection
results = pose.process(frame)
## recolor back to BGR
frame.flags.writeable = True
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
try:
landmarks = results.pose_landmarks.landmark
counter, status, hint = TypeOfControl(landmarks).calculate_exercise(
args["exercise_type"], counter, status, hint)
except:
pass
## render detections (for landmarks)
mp_drawing.draw_landmarks(
frame,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255, 255, 255),
thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(174, 139, 45),
thickness=2,
circle_radius=2),
)
try:
angle = BodyPartAngle(landmarks)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_left_arm())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 235), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_right_arm())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 235), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_KNEE'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_KNEE'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_left_leg())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (235, 150, 150), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_right_leg())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (235, 150, 150), 2)
cx = int(w *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_SHOULDER'].value].x+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_SHOULDER'].value].x)/2)
cy = int(h *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_SHOULDER'].value].y+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_SHOULDER'].value].y)/2)
cv2.putText(frame, str(round(angle.angle_of_the_neck())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 235, 150), 2)
cx = int(w *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_HIP'].value].x+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].x)/2)
cy = int(h *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_HIP'].value].y+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].y)/2)
cv2.putText(frame, str(round(angle.angle_of_the_abdomen())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 150), 2)
except:
pass
#score_frame = score_table(args["exercise_type"], counter, status, hint)
#print(frame.shape,score_frame.shape)
#im_h_resize = cv2.hconcat([frame, score_frame])
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# frame = frame.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
## 健身影像
def fitness_frames(exercise_type):
if args["video_source"] is not None:
cap = cv2.VideoCapture(args["video_source"])
else:
cap = cv2.VideoCapture(0) # webcam
w = 960
h = 720
cap.set(3, w) # width
cap.set(4, h) # height
mp4=f"videos/{exercise_type}.mp4"
cap = cv2.VideoCapture(mp4)
counter = 0 # movement of exercise
status = True # state of move
hint = "Ready!"
switch=True
soundon = 0
flag = 0
# mp3=f"sounds/{exercise_type}.mp3"
# pygame.mixer.init()
# pygame.mixer.music.load(mp3)
# while cap.isOpened():
# #print(exercise_type)
# if soundon == 0 :
# pygame.mixer.music.play()
# soundon = 1
# try:
# ret, frame = cap.read()
# frame = cv2.resize(frame, (1600, 960), interpolation=cv2.INTER_AREA)
# ret, buffer = cv2.imencode('.jpg', frame)
# frame = buffer.tobytes()
# # frame = frame.tobytes()
# # 文字資訊寫入txt檔
# # 網頁生成webcam影像
# cv2.waitKey(1) #<--從25改1,比較順
# with open('fitness.txt','w+') as f:
# f.write(f"{switch},{exercise_type},{counter},{status},{hint}"+'\n')
# yield (b'--frame\r\n'
# b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# except:
# break
cap = cv2.VideoCapture(0)
## setup mediapipe
with mp_pose.Pose(min_detection_confidence=0.8,
min_tracking_confidence=0.8) as pose:
encourage=["images/super.png", "images/greatjob.png", "images/goodjob1.png", "images/goodjob.png",
"images/welldown.png", "images/awesome.png","images/nicework.png" ]
start_time = time.time()
while cap.isOpened():
ret, frame = cap.read()
frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
## recolor frame to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame.flags.writeable = False
## make detection
results = pose.process(frame)
## recolor back to BGR
frame.flags.writeable = True
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
try:
#counter+=0.1
landmarks = results.pose_landmarks.landmark
counter, status, hint= TypeOfExercise(landmarks).calculate_exercise(
exercise_type, counter, status, hint)
except:
pass
## render detections (for landmarks)
mp_drawing.draw_landmarks(
frame,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255, 255, 255),
thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(174, 139, 45),
thickness=2,
circle_radius=2),
)
try:
angle = BodyPartAngle(landmarks)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_left_arm())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 235), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_right_arm())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 235), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_KNEE'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_KNEE'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_left_leg())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (235, 150, 150), 2)
cx = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].x)
cy = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_right_leg())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (235, 150, 150), 2)
cx = int(w *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_SHOULDER'].value].x+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_SHOULDER'].value].x)/2)
cy = int(h *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_SHOULDER'].value].y+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_SHOULDER'].value].y)/2)
cv2.putText(frame, str(round(angle.angle_of_the_neck())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 235, 150), 2)
cx = int(w *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_HIP'].value].x+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].x)/2)
cy = int(h *(landmarks[mp.solutions.pose.PoseLandmark['LEFT_HIP'].value].y+landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].y)/2)
cv2.putText(frame, str(round(angle.angle_of_the_abdomen())), (cx-20, cy-20),
cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 150), 2)
except:
pass
if int(counter)%5==0 and flag == 0:
enc_img = cv2.imread(random.choice(encourage))
flag = 1
if 0<round(counter,1)%5.0<2:
enc_img = cv2.resize( enc_img, (250, 250),0,fx=1,fy=1,interpolation=cv2.INTER_AREA)
img_height, img_width, _ = enc_img.shape
enc_img_gray = cv2.cvtColor(enc_img, cv2.COLOR_BGR2GRAY)
_, enc_img_mask = cv2.threshold(enc_img_gray, 25, 255, cv2.THRESH_BINARY_INV)
x, y = int(300-img_width/2), int(400-img_height/2)
enc_img_area = frame[y: y+img_height, x: x+img_width]
enc_img_area_no_enc_img = cv2.bitwise_and(enc_img_area, enc_img_area, mask=enc_img_mask)
final_enc_img = cv2.add(enc_img_area_no_enc_img, enc_img)
frame[y: y+img_height, x: x+img_width] = final_enc_img
flag = 0
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# 文字資訊寫入txt檔
if round(counter,1)>=20:
switch=False
#print(switch)
end_time = time.time()-start_time
with open('fitness.txt','w+') as f:
f.write(f"{switch},{exercise_type},{counter},{status},{hint},{end_time}"+'\n')
#print(end_time)
# 網頁生成webcam影像
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
## 拳擊遊戲game1影像
def games_frames(game_type='game1'):
if args["video_source"] is not None:
cap = cv2.VideoCapture(args["video_source"])
else:
cap = cv2.VideoCapture(0) # webcam
w = 1280
h = 960
cap.set(3, w) # width
cap.set(4, h) # height
#音效初始
file = f"sounds/game1.mp3"
pygame.mixer.init()
pygame.mixer.music.load(file)
soundon = 0
game_status = 0 #start644
#設置遊戲初始環境
start_time = time.time()
env_list = game_start(game_type)
counter = 0 # movement of exercise
## setup mediapipe
with mp_pose.Pose(min_detection_confidence=0.8,
min_tracking_confidence=0.8) as pose:
while cap.isOpened():
ret, frame = cap.read()
# result_screen = np.zeros((250, 400, 3), np.uint8)
frame = cv2.flip(frame,1)
frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
## recolor frame to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame.flags.writeable = False
## make detection
results = pose.process(frame)
## recolor back to BGR
frame.flags.writeable = True
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#================================================================
#遊戲開始進行之後回傳給系統的參數
env_coordinate = game_play(game_type,env_list,time.time()-start_time)
#參數被畫在畫布上的樣子
frame = game_plot(game_type,frame,env_coordinate)
#================================================================
try:
if soundon==0 :
pygame.mixer.music.play()
soundon = 1
start_time = time.time()
landmarks = results.pose_landmarks.landmark
total_status = []
for i,env in enumerate(env_coordinate):
counter, env_list[i].status = TypeOfMove(landmarks).calculate_exercise(game_type, counter, env[0],[w,h,env[1],env[2],env[3],env[4]])
total_status.append(env_list[i].status)
except:
total_status = []
pass
# score_table(game_type, counter, [str(x)[0] for x in total_status],timer(start_time))
## render detections (for landmarks)
mp_drawing.draw_landmarks(
frame,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255, 255, 255),
thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(174, 139, 45),
thickness=2,
circle_radius=2),
)
## 虛擬手套繪製
try:
if game_type == 'game1':
angle = BodyPartAngle(landmarks)
x_LEFT_WRIST = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_WRIST'].value].x)
y_LEFT_WRIST = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_WRIST'].value].y)
# cv2.putText(frame, str(f'{x_LEFT_WRIST},{y_LEFT_WRIST}'), (x_LEFT_WRIST+40, y_LEFT_WRIST+40),cv2.FONT_HERSHEY_PLAIN, 2, (150, 150, 235), 2)
x_RIGHT_WRIST = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_WRIST'].value].x)
y_RIGHT_WRIST = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_WRIST'].value].y)
#cv2.putText(frame, str(f'{x_RIGHT_WRIST},{y_RIGHT_WRIST}'), (x_RIGHT_WRIST+40, y_RIGHT_WRIST+40),cv2.FONT_HERSHEY_PLAIN, 2, (212, 255, 127), 2)
x_LEFT_ELBOW = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].x)
y_LEFT_ELBOW = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_ELBOW'].value].y)
#print(f'LEFT_ELBOW[{x_LEFT_ELBOW},{y_LEFT_ELBOW}]')
cv2.putText(frame, str(round(angle.angle_of_the_left_arm())), (x_LEFT_ELBOW-20, y_LEFT_ELBOW-20),
cv2.FONT_HERSHEY_PLAIN, 2, (212, 255, 127), 2)
cv2.putText(frame, str(round(angle.left_angle_of_the_elbow_horizon())), (x_LEFT_ELBOW+60, y_LEFT_ELBOW+60),
cv2.FONT_HERSHEY_PLAIN, 2, (54, 38, 227), 2)
x_RIGHT_ELBOW = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].x)
y_RIGHT_ELBOW = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_ELBOW'].value].y)
cv2.putText(frame, str(round(angle.angle_of_the_right_arm())), (x_RIGHT_ELBOW-20, y_RIGHT_ELBOW-20),
cv2.FONT_HERSHEY_PLAIN, 2, (212, 255, 127), 2)
cv2.putText(frame, str(round(angle.right_angle_of_the_elbow_horizon())), (x_RIGHT_ELBOW+60, y_RIGHT_ELBOW+20),
cv2.FONT_HERSHEY_PLAIN, 2, (54, 38, 227), 2)
x_LEFT_INDEX = int(w *landmarks[mp.solutions.pose.PoseLandmark['LEFT_INDEX'].value].x)
y_LEFT_INDEX = int(h *landmarks[mp.solutions.pose.PoseLandmark['LEFT_INDEX'].value].y)
#print(f'LEFT_INDEX[{x_LEFT_INDEX},{y_LEFT_INDEX}]')
x_RIGHT_INDEX = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_INDEX'].value].x)
y_RIGHT_INDEX = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_INDEX'].value].y)
#print(f'RIGHT_INDEX[{x_RIGHT_INDEX},{y_RIGHT_INDEX}]')
x_RIGHT_KNEE = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].x)
y_RIGHT_KNEE = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_KNEE'].value].y)
x_RIGHT_HIP = int(w *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].x)
y_RIGHT_HIP = int(h *landmarks[mp.solutions.pose.PoseLandmark['RIGHT_HIP'].value].y)
distance_of_KNEE_HIP=int(abs(((x_RIGHT_KNEE-x_RIGHT_HIP)**2+(y_RIGHT_KNEE-y_RIGHT_HIP)**2)**0.5))
# 左手
# 載入手套的圖片
if round(angle.left_angle_of_the_elbow_horizon()) <= 105 :
glove = cv2.imread("images/glove1.png")
glove = cv2.flip(glove,1)
img_height, img_width, _ = glove.shape
#手套的參考長度
glove_size= (distance_of_KNEE_HIP*2)
print(f'glove_size:{glove_size}')
#圖片轉換成適合的大小
glove = cv2.resize( glove, (glove_size, glove_size),0,fx=1,fy=1,interpolation=cv2.INTER_AREA)
# 第一個參數旋轉中心(圖片中心),第二個參數旋轉角度(-順時針/+逆時針),第三個參數縮放比例
#print(f'y_LEFT_ELBOW {y_LEFT_ELBOW},y_LEFT_WRIST {y_LEFT_WRIST}')
if y_LEFT_ELBOW >= y_LEFT_WRIST:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), -round(angle.left_angle_of_the_elbow_horizon()-90), 1.0)
else:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), round(angle.left_angle_of_the_elbow_horizon()+90), 1.0)
# 第三個參數變化後的圖片大小
glove = cv2.warpAffine(glove, M, (glove_size, glove_size))
#return rotate_img
#print(glove.shape)
# 透過一系列的處理將眼睛圖片貼在手上
glove_gray = cv2.cvtColor(glove, cv2.COLOR_BGR2GRAY)
_, glove_mask = cv2.threshold(glove_gray, 25, 255, cv2.THRESH_BINARY_INV)
x, y = int(x_LEFT_INDEX-glove_size/2), int(y_LEFT_INDEX-glove_size/2)#xy是圖的左上角
glove_area = frame[y: y+glove_size, x: x+glove_size]
glove_area_no_glove = cv2.bitwise_and(glove_area, glove_area, mask=glove_mask)
final_glove = cv2.add(glove_area_no_glove, glove)
frame[y: y+glove_size, x: x+glove_size] = final_glove
else:
glove = cv2.imread("images/glove2.png")
img_height, img_width, _ = glove.shape
#手套的參考長度
glove_size= (distance_of_KNEE_HIP*2)
print(f'glove_size:{glove_size}')
#圖片轉換成適合的大小
glove = cv2.resize( glove, (glove_size, glove_size),0,fx=1,fy=1,interpolation=cv2.INTER_AREA)
# 第一個參數旋轉中心(圖片中心),第二個參數旋轉角度(-順時針/+逆時針),第三個參數縮放比例
#print(f'y_LEFT_ELBOW {y_LEFT_ELBOW},y_LEFT_WRIST {y_LEFT_WRIST}')
if y_LEFT_ELBOW >= y_LEFT_WRIST:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), -round(angle.left_angle_of_the_elbow_horizon()-90), 1.0)
else:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), round(angle.left_angle_of_the_elbow_horizon()+90), 1.0)
# 第三個參數變化後的圖片大小
glove = cv2.warpAffine(glove, M, (glove_size, glove_size))
#return rotate_img
#print(glove.shape)
# 透過一系列的處理將眼睛圖片貼在手上
glove_gray = cv2.cvtColor(glove, cv2.COLOR_BGR2GRAY)
_, glove_mask = cv2.threshold(glove_gray, 25, 255, cv2.THRESH_BINARY_INV)
x, y = int(x_LEFT_INDEX-glove_size/2), int(y_LEFT_INDEX-glove_size/2)#xy是圖的左上角
glove_area = frame[y: y+glove_size, x: x+glove_size]
glove_area_no_glove = cv2.bitwise_and(glove_area, glove_area, mask=glove_mask)
final_glove = cv2.add(glove_area_no_glove, glove)
frame[y: y+glove_size, x: x+glove_size] = final_glove
if round(angle.right_angle_of_the_elbow_horizon()) <= 105 :
# 右手
glove = cv2.imread("images/glove1.png")
img_height, img_width, _ = glove.shape
glove_size= (distance_of_KNEE_HIP*2)
glove = cv2.resize( glove, (glove_size, glove_size),0,fx=1,fy=1,interpolation=cv2.INTER_AREA)
print(f'y_RIGHT_ELBOW {y_RIGHT_ELBOW},y_RIGHT_WRIST {y_RIGHT_WRIST}')
if y_RIGHT_ELBOW >= y_RIGHT_WRIST:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), round(angle.right_angle_of_the_elbow_horizon()-90), 1.0)
else:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), -round(angle.right_angle_of_the_elbow_horizon()+90), 1.0)
# M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), round(angle.right_angle_of_the_elbow_horizon()-90), 1.0)
glove = cv2.warpAffine(glove, M, (glove_size, glove_size))
glove_gray = cv2.cvtColor(glove, cv2.COLOR_BGR2GRAY)
_, glove_mask = cv2.threshold(glove_gray, 25, 255, cv2.THRESH_BINARY_INV)
x, y = int(x_RIGHT_INDEX-glove_size/2), int(y_RIGHT_INDEX-glove_size/2)#xy是圖的左上角
glove_area = frame[y: y+glove_size, x: x+glove_size]
glove_area_no_glove = cv2.bitwise_and(glove_area, glove_area, mask=glove_mask)
final_glove = cv2.add(glove_area_no_glove, glove)
frame[y: y+glove_size, x: x+glove_size] = final_glove
else:
glove = cv2.imread("images/glove2.png")
glove = cv2.flip(glove,1)
img_height, img_width, _ = glove.shape
glove_size= (distance_of_KNEE_HIP*2)
glove = cv2.resize( glove, (glove_size, glove_size),0,fx=1,fy=1,interpolation=cv2.INTER_AREA)
print(f'y_RIGHT_ELBOW {y_RIGHT_ELBOW},y_RIGHT_WRIST {y_RIGHT_WRIST}')
if y_RIGHT_ELBOW >= y_RIGHT_WRIST:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), round(angle.right_angle_of_the_elbow_horizon()-90), 1.0)
else:
M = cv2.getRotationMatrix2D((glove_size // 2, glove_size // 2), -round(angle.right_angle_of_the_elbow_horizon()+90), 1.0)
glove = cv2.warpAffine(glove, M, (glove_size, glove_size))
glove_gray = cv2.cvtColor(glove, cv2.COLOR_BGR2GRAY)
_, glove_mask = cv2.threshold(glove_gray, 25, 255, cv2.THRESH_BINARY_INV)
x, y = int(x_RIGHT_INDEX-glove_size/2), int(y_RIGHT_INDEX-glove_size/2)#xy是圖的左上角
glove_area = frame[y: y+glove_size, x: x+glove_size]
glove_area_no_glove = cv2.bitwise_and(glove_area, glove_area, mask=glove_mask)
final_glove = cv2.add(glove_area_no_glove, glove)
frame[y: y+glove_size, x: x+glove_size] = final_glove
except:
pass
if timer(start_time) == "00:00:55":
game_status = 1 #end
# score_frame = score_table(game_type, counter, env[0], [w,h,env[1],env[2],env[3],env[4]])
# score_frame = cv2.resize(score_frame, (320,720), interpolation=cv2.INTER_AREA)
# print(frame.shape,score_frame.shape)
# im_h_resize = cv2.hconcat([frame, score_frame])
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# frame = frame.tobytes()
# 文字資訊寫入txt檔
with open('game.txt','w+') as f:
f.write(f"{game_status},{game_type},{counter},{timer(start_time)}"+'\n')
# 生成二進為圖檔
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
## 一二三我姑媽game2影像
def game2_frames(game_type='game2'):
def timer1(start_time):
time_diff = time.time()-start_time
return time_diff
file = f"sounds/game2.mp3"
pygame.mixer.init()
pygame.mixer.music.load(file)
game_over = False
soundon = 0
start = 0
total_use_time = 0
game_status = 0 # game start
mp_drawing = mp.solutions.drawing_utils
mp_selfie_segmentation = mp.solutions.selfie_segmentation
mp_pose = mp.solutions.pose
#設置遊戲初始環境
start_time = time.time()
pose = mp_pose.Pose(min_detection_confidence=0.8,
min_tracking_confidence=0.8)
counter = 0
hard = 21
time_list = [8*g+x for g,x in enumerate(sorted(random.sample(range(4,70),20)))]
print(time_list)
final_frame = np.ones((800,1320,3),dtype =np.uint8)*60
print(final_frame.shape)
cap = cv2.VideoCapture(0)
status = True
with mp_selfie_segmentation.SelfieSegmentation(
model_selection=1) as selfie_segmentation:
bg_image = None
while cap.isOpened():
#=== 作弊程式 ====
# if counter<hard-1:
# counter += 0.2
#===================
if soundon== 0 :
if int(timer1(start_time)) in time_list:
pygame.mixer.music.play()
soundon = 1
act_time = time.time()
print(counter)
if soundon == 1 and 4.7<(time.time()-act_time)<8:
L_IMAGE = cv2.imread('images/007.png')
L_IMAGE = cv2.resize(L_IMAGE,(640,780),interpolation=cv2.INTER_LINEAR)
elif soundon == 1 and (time.time()-act_time)>8:
soundon=0
else:
L_IMAGE = cv2.imread('images/006.png')
if game_over:
game_status = 1 #fail
soundon = 2
end_time = time.time()
total_use_time = timer(start_time,end_time)
break
img = cv2.imread('images/008.png')
h_center = int(img.shape[0]/2)
w_center = int(img.shape[1]/2)
dead_h = img.shape[0]
dead_w = img.shape[1]
ratio = min(1,(1+dead_time)/8)
print(ratio)
img = img[int(h_center-ratio*dead_h/2):int(h_center+ratio*dead_h/2),int(w_center-ratio*dead_w/2):int(w_center+ratio*dead_w/2)]
final_frame = cv2.resize(img,(1320,800),interpolation=cv2.INTER_LINEAR)
elif (hard-1-0.01)<counter<(hard-1+0.22):
game_status = 2 #win
soundon = 2
break
if start == 0:
cap = cv2.VideoCapture('images/victory.mp4')
start +=1
elif cap.get(cv2.CAP_PROP_POS_FRAMES) <227:
ret , final_frame = cap.read()
final_frame = cv2.resize(img,(1320,800),interpolation=cv2.INTER_LINEAR)
end_time = time.time()
else:
vic_time = time.time()-end_time
img = cv2.imread('images/victory.png')
total_use_time = timer(start_time,end_time)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,f"Total time : {total_use_time}",(320,600),font,2,(80,127,255),3,cv2.LINE_AA)
h_center = int(img.shape[0]/2)
w_center = int(img.shape[1]/2)
vic_h = img.shape[0]
vic_w = img.shape[1]
ratio = min(1,(1+vic_time)/8)
img = img[int(h_center-ratio*vic_h/2):int(h_center+ratio*vic_h/2),int(w_center-ratio*vic_w/2):int(w_center+ratio*vic_w/2)]
final_frame = cv2.resize(img,(1320,800),interpolation=cv2.INTER_LINEAR)
else:
BG_COLOR = cv2.imread("images/004.png")
RL_IMAGE = cv2.imread('images/005.png')
RL_x = int(0+(700/hard)*counter)
RL_y = int(0+(900/hard)*counter)
RL_Rangex = int(2560-1920*counter/hard)
RL_Rangey = int(1200-900*counter/hard)
RL_IMAGE = RL_IMAGE[RL_y:RL_y+RL_Rangey,RL_x:RL_x+RL_Rangex]
RL_IMAGE = cv2.resize(RL_IMAGE,(640,300), interpolation=cv2.INTER_AREA)
Orix = int(1533-(893/hard)*counter)
Oriy = int(1150-(670/hard)*counter)
BG_COLOR = cv2.resize(BG_COLOR, (Orix, Oriy), interpolation=cv2.INTER_AREA)
x = int(447-(447/hard)*counter)
y = int(335-(335/hard)*counter)
w = 640
h = 480
BG_COLOR = BG_COLOR[y:y+h, x:x+w]
# print(counter)
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.resize(image,(640,480))
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = selfie_segmentation.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Draw selfie segmentation on the background image.
# To improve segmentation around boundaries, consider applying a joint
# bilateral filter to "results.segmentation_mask" with "image".
condition = np.stack(
(results.segmentation_mask,) * 3, axis=-1) > 0.1
# The background can be customized.
# a) Load an image (with the same width and height of the input image) to
# be the background, e.g., bg_image = cv2.imread('/path/to/image/file')
# b) Blur the input image by applying image filtering, e.g.,
# bg_image = cv2.GaussianBlur(image,(55,55),0)
# if bg_image is None:
# bg_image = np.zeros(image.shape, dtype=np.uint8)
# bg_image[:] = BG_COLOR
output_image = np.where(condition, image, BG_COLOR)
output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
output_image.flags.writeable = False
results = pose.process(output_image)
output_image.flags.writeable = True
output_image = cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR)
try:
landmarks = results.pose_landmarks.landmark
counter, status= TypeOfMove(landmarks).calculate_exercise(game_type, counter, status)
# print('landmark')
if 4.4<time.time()-act_time<5.1:
[LA,RA,LL,RL,N,AB] = BodyPartAngle(landmarks).angle_of_the_left_arm(),BodyPartAngle(landmarks).angle_of_the_right_arm(),BodyPartAngle(landmarks).angle_of_the_left_leg(),BodyPartAngle(landmarks).angle_of_the_right_leg(),BodyPartAngle(landmarks).angle_of_the_neck(),BodyPartAngle(landmarks).angle_of_the_abdomen()
[LWRV,RWRV,LELV,RELV,LKNV,RKNV] = detection_body_part(landmarks, "LEFT_WRIST")[2],detection_body_part(landmarks, "RIGHT_WRIST")[2],detection_body_part(landmarks, "LEFT_ELBOW")[2],detection_body_part(landmarks, "RIGHT_ELBOW")[2],detection_body_part(landmarks, "LEFT_KNEE")[2],detection_body_part(landmarks, "RIGHT_KNEE")[2]
# print([LA,RA,LL,RL,N,AB])
# print([LWRV,RWRV,LELV,RELV,LANV,RANV])
if 4.6<time.time()-act_time<8:
test = max(abs(LA-BodyPartAngle(landmarks).angle_of_the_left_arm()),abs(RA-BodyPartAngle(landmarks).angle_of_the_right_arm()),abs(LL-BodyPartAngle(landmarks).angle_of_the_left_leg()),abs(RL-BodyPartAngle(landmarks).angle_of_the_right_leg()),abs(N-BodyPartAngle(landmarks).angle_of_the_neck()),abs(AB-BodyPartAngle(landmarks).angle_of_the_abdomen()))
Vtest = np.mean([LWRV,RWRV,LELV,RELV,LKNV,RKNV])
print(Vtest)
# print(test)
if test > 10 and Vtest>0.7 :
game_over = True
over_time = time.time()
# print('final')
except:
pass
mp_drawing.draw_landmarks(
output_image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255, 255, 255),
thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(174, 139, 45),
thickness=2,
circle_radius=2),
)
final_frame[5:305,670:1310] = RL_IMAGE
final_frame[315:795,670:1310] = output_image
final_frame[10:790,10:650] = L_IMAGE
ret, buffer = cv2.imencode('.jpg', final_frame)
frame = buffer.tobytes()
# game_status: 0=START, 1=LOSE, 2=WIN
with open('game.txt','w+') as f:
f.write(f"{game_status},{game_type},{int(round(counter))},{timer(start_time)},{total_use_time}"+'\n')
# 網頁生成webcam影像(勝利與失敗畫面會當機,另寫在下面)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
cap = cv2.VideoCapture('images/victory.mp4')
while cap.isOpened():
if cap.get(cv2.CAP_PROP_POS_FRAMES) <227 and game_status == 2:
ret , final_frame = cap.read()
final_frame = cv2.resize(final_frame,(1320,800),interpolation=cv2.INTER_LINEAR)
end_time = time.time()
elif game_status == 2:
vic_time = time.time()-end_time
img = cv2.imread('images/victory.png')
total_use_time = timer(start_time,end_time)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,f"Total time : {total_use_time}",(320,600),font,2,(80,127,255),3,cv2.LINE_AA)
h_center = int(img.shape[0]/2)
w_center = int(img.shape[1]/2)
vic_h = img.shape[0]
vic_w = img.shape[1]
ratio = min(1,(1+vic_time)/2) #原本/8
img = img[int(h_center-ratio*vic_h/2):int(h_center+ratio*vic_h/2),int(w_center-ratio*vic_w/2):int(w_center+ratio*vic_w/2)]
final_frame = cv2.resize(img,(1320,800),interpolation=cv2.INTER_LINEAR)
if ratio ==1:
game_status = 3
else:
dead_time = time.time()-over_time
img = cv2.imread('images/008.png')
h_center = int(img.shape[0]/2)
w_center = int(img.shape[1]/2)
dead_h = img.shape[0]
dead_w = img.shape[1]
ratio = min(1,(1+dead_time)/2) #原本/8
print(ratio)
img = img[int(h_center-ratio*dead_h/2):int(h_center+ratio*dead_h/2),int(w_center-ratio*dead_w/2):int(w_center+ratio*dead_w/2)]
final_frame = cv2.resize(img,(1320,800),interpolation=cv2.INTER_LINEAR)
ret, buffer = cv2.imencode('.jpg', final_frame)
frame = buffer.tobytes()
# game_status: 0=START, 1=LOSE, 2=WIN
with open('game.txt','w+') as f:
f.write(f"{game_status},{game_type},{int(round(counter))},{timer(start_time)},{total_use_time}"+'\n')
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
## 跳舞遊戲game3影像(太多code,另外引入)
##子豪新增部分 叫出命令列跑副程式
def run_win_cmd(cmd):
result = []
process = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in process.stdout:
result.append(line)
errcode = process.returncode
for line in result:
print(line)
if errcode is not None:
raise Exception('cmd %s failed, see above for details', cmd)
return True
##子豪新增部分 跑穿衣的影像
def tryon_frames():
## music setting (if needed)
file = f"sounds/fashion.mp3"
pygame.mixer.init()
pygame.mixer.music.load(file)
soundon = 0
# ## drawing body
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
## setting the video source
if args["video_source"] is not None:
cap = cv2.VideoCapture(args["video_source"])
else:
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW) # webcam
w = 1600
h = 1200
cap.set(3, w) # width
cap.set(4, h) # height
#設置初始環境
start_time = time.time()
env_list = tryon_start('tryon1')
counter = 0 # movement of exercise
tryon_status = 1
page = 0
## setup mediapipe
with mp_pose.Pose(min_detection_confidence=0.8,
min_tracking_confidence=0.8) as pose:
while cap.isOpened():
ret, frame = cap.read()
# result_screen = np.zeros((250, 400, 3), np.uint8)
frame = cv2.flip(frame,1)
frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
## recolor frame to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame.flags.writeable = False
## make detection
results = pose.process(frame)
## recolor back to BGR
frame.flags.writeable = True
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#================================================================
if tryon_status == 0:
start_time = time.time()
env_list = tryon_start('tryon1')
counter = 0 # movement of exercise
tryon_status = 1
elif tryon_status == 1:
#試穿進行之後回傳給系統的參數
env_coordinate = tryon_play('tryon1',env_list,time.time()-start_time)
#參數被畫在畫布上的樣子
frame = tryon_plot('tryon1',frame,env_coordinate)
#================================================================
try:
if soundon==0 :
pygame.mixer.music.play()
soundon = 1
start_time = time.time()
landmarks = results.pose_landmarks.landmark
total_choice = []
total_status = []
for i,env in enumerate(env_list):
counter, env.status , tryon_status,page = TypeOfTry(landmarks).calculate_exercise(
'tryon1', counter,env.status, env,w,h,tryon_status,0)
total_status.append(env.status)
total_choice.append(env.choice)
except:
total_status = []
total_choice = []
pass
# score_table('tryon1', counter, [str(x)[0] for x in total_status],[str(x)[0] for x in total_choice],timer(start_time))
tryon2start = time.time()
elif tryon_status == 2 :
try:
landmarks = results.pose_landmarks.landmark
if len(env_list):
[frame , tryon2start] = tryon2_plot('tryon1',frame,env_list,w,h,tryon2start)
else:
tryon_status = 3
path="VITON-HD/datasets/play/cloth"
[product_list,env_list] = add_product(path)
start_time = time.time()
page = 0
max_page = math.ceil(len(product_list)/4)
except:
pass
elif tryon_status == 3:
work_list = []
if page > max_page:
page = max_page
elif page < 0:
page = 0
work_list.extend(product_list[page*4:page*4+4])
work_list.extend(env_list)
env_coordinate = tryon_play('tryon1',work_list,time.time()-start_time)
frame = tryon_plot('tryon1',frame,env_coordinate,path)
try:
landmarks = results.pose_landmarks.landmark
for i,env in enumerate(work_list):
counter, env.status , tryon_status, page = TypeOfTry(landmarks).calculate_exercise(
'tryon1', counter,env.status, env,w,h,tryon_status,page)
except:
pass
elif tryon_status == 4:
with open('product.txt','w+') as f:
for obj in product_list:
if obj.choice:
filename = obj.position.split('-')[-1]
f.writelines(f"{filename}.jpg"+'\n')
break
## render detections (for landmarks)
# mp_drawing.draw_landmarks(
# frame,
# results.pose_landmarks,
# mp_pose.POSE_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(255, 255, 255),
# thickness=2,
# circle_radius=2),
# mp_drawing.DrawingSpec(color=(174, 139, 45),
# thickness=2,
# circle_radius=2),
# )
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# frame = frame.tobytes()
with open('tryon.txt','w+') as f:
f.write(f"True")
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
t = threading.Thread(target=run_win_cmd,args=(f'python subpro.py',))
t.start()
cap = cv2.VideoCapture('videos/wait.mp4')
while cap.isOpened():
ret, frame = cap.read()
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# frame = frame.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
if not t.is_alive():
break
t.join()
final_path ='VITON-HD/results/play'
final_file = [f"{final_path}/{file}" for file in os.listdir(final_path)]
total_pics = len(final_file)
cycle_test = total_pics // 8 #測試會有幾個拼貼流程
remain_test = total_pics % 8 #測試不成套的圖有幾個
if cycle_test<1:
total_w = 2600
else:
if remain_test==0:
# 加尾數32
total_w = cycle_test*2202+32
elif remain_test < 4:
# 加起始32跟前三個拼貼的寬度768再加尾數32
total_w = cycle_test*2202+32+768+32
elif remain_test < 6:
# 追加中間2個拼貼的寬度768再加尾數32
total_w = cycle_test*2202+32+768+32+570+32
else:
# 完整兩塊拼貼版+尾數32
total_w = cycle_test*2202+2202+32
total_h = 1620
final_frame = np.zeros((total_h,total_w,3),dtype=np.uint8)*30
for i,show in enumerate(final_file):
i_cycle_test = (i+1)//8
i_remain_test = (i+1)%8
image = cv2.imread(show)
if i_remain_test % 8 == 1:
final_frame[30:1054,(i_cycle_test*2202+32):(32+i_cycle_test*2202)+768] = image
elif i_remain_test % 8 == 2:
image = cv2.resize(image,(364,486),interpolation=cv2.INTER_AREA)
final_frame[(30+1024+50):(30+1024+50)+486,(i_cycle_test*2202+32):(32+i_cycle_test*2202)+364] = image
elif i_remain_test % 8 == 3:
image = cv2.resize(image,(364,486),interpolation=cv2.INTER_AREA)
final_frame[(30+1024+50):(30+1024+50)+486,(i_cycle_test*2202+32+364+40):(32+i_cycle_test*2202+364+40)+364] = image
elif i_remain_test % 8 == 4:
image = cv2.resize(image,(570,760),interpolation=cv2.INTER_AREA)
final_frame[30:30+760,(i_cycle_test*2202+32+768+32):(i_cycle_test*2202+32+768+32)+570] = image
elif i_remain_test % 8 == 5:
image = cv2.resize(image,(570,760),interpolation=cv2.INTER_AREA)
final_frame[(30+760+40):(30+760+40)+760,(i_cycle_test*2202+32+768+32):(i_cycle_test*2202+32+768+32)+570] = image
elif i_remain_test % 8 == 6 :
final_frame[(30+486+50):(30+486+50)+1024,(i_cycle_test*2202+32+768+32+570+32):(i_cycle_test*2202+32+768+32+570+32)+768] = image
elif i_remain_test % 8 == 7 :
image = cv2.resize(image,(364,486),interpolation=cv2.INTER_AREA)
final_frame[30:(30+486),(i_cycle_test*2202+32+768+32+570+32):(i_cycle_test*2202+32+768+32+570+32)+364] = image
elif i_remain_test % 8 == 0 :
image = cv2.resize(image,(364,486),interpolation=cv2.INTER_AREA)
final_frame[30:(30+486),((i_cycle_test-1)*2202+32+768+32+570+32+364+32):((i_cycle_test-1)*2202+32+768+32+570+32+364+32)+364] = image
fn = show.split('/')[-1]
os.rename(show,f'history_output\{fn}')
display_start = time.time()
scroll = 80
end_flag = 0
end_time = display_start+500
outputname = round(time.time())
cv2.imwrite(f'history_output\output{outputname}.jpg',final_frame)
while True:
now_time = time.time()-display_start
now_w = int(now_time*scroll)
right_w = min(total_w,now_w+2600)
if right_w== total_w:
now_w = right_w-2600
if end_flag == 0 :
end_time = time.time()
end_flag = 1
show_frame = final_frame[:,now_w:right_w]
show_frame = cv2.resize(show_frame,(1600,1024),interpolation=cv2.INTER_AREA)
ret, buffer = cv2.imencode('.jpg', show_frame)
frame = buffer.tobytes()
# frame = frame.tobytes()
with open('tryon.txt','w+') as f:
if time.time()-end_time>5:
test_product = []
for obj in product_list:
if obj.choice:
filename = obj.position.split('-')[-1]
test_product.append(f"{filename}.jpg")
pro_str = ','.join(test_product)
f.write(f"False,{pro_str}")
else:
f.write(f"True")
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
## ====flask路由====
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=8)
## 登入畫面
@app.route('/login', methods=['GET', 'POST']) # 支援get、post請求
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password'] #.encode('utf-8')
if email == "abc@gmail.com":
user = "user"
if user == None :
# return "沒有這個帳號"
flash('沒有這個帳號')
return render_template("login.html")
if len(user) != 0:
if password == '12345':
session['name'] = 'abc'
session['email'] = 'abc@gmail.com'
return render_template("index_3D.html")
else:
# return "您的密碼錯誤"
flash('您的密碼錯誤')
return render_template("login.html")
# 以下暫時寫的
else:
flash('沒有這個帳號')
return render_template("login.html")
else:
return render_template("login.html")
## 主選單
@app.route('/')
def index():
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
# session['name'] = False
username = session.get('name') # 取session
if username == "abc":
return render_template('index_3D.html')
else:
return redirect('/login')
#return render_template('index.html')
## control頁面影像
@app.route('/video_feed')
def video_feed():
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(control_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
## 運動頁面
@app.route('/fitness/<string:exercise_type>')
def fitness(exercise_type):
#清空文件檔
with open('fitness.txt','w+') as f:
f.write(f""+'\n')
return render_template('fitness.html',exercise_type=exercise_type)
## 遊戲頁面
@app.route('/game/<string:game_type>')
def game(game_type):
#清空文件檔
with open('game.txt','w+') as f:
f.write(f""+'\n')
return render_template('game.html',game_type=game_type)
## 子豪新增部分 穿衣頁面
@app.route('/tryon_stage')
def tryon_stage():
#清空文件檔
with open('tryon.txt','w+') as f:
f.write(f"True,"+'\n')
return render_template('tryon_stage.html',title = 'tryon_feed')
## 運動頁面影像
@app.route('/fitness_feed/<string:exercise_type>')
def fitness_feed(exercise_type):
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(fitness_frames(exercise_type), mimetype='multipart/x-mixed-replace; boundary=frame')
## 遊戲頁面影像
@app.route('/games_feed/<string:game_type>')
def games_feed(game_type):
if game_type=='game1':
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(games_frames(game_type), mimetype='multipart/x-mixed-replace; boundary=frame')
elif game_type=='game2':
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(game2_frames(game_type), mimetype='multipart/x-mixed-replace; boundary=frame')
elif game_type=="game3":
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(game3_frames(game_type), mimetype='multipart/x-mixed-replace; boundary=frame')
## 子豪新增部分 穿衣頁面影像
@app.route('/tryon_feed')
def tryon_feed():
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return Response(tryon_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
## 健身選單
@app.route('/sport')
def sport():
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return render_template('sport_3D.html')
## 遊戲選單
@app.route('/game_menu')
def game_menu():
# 先關聲音
pygame.mixer.init()
pygame.mixer.music.stop()
return render_template('game_menu_3D.html')
# ## 測試選單
# @app.route('/test')
# def test():
# return render_template('test.html',title = 'fitness_feed/squat')
## 健身傳字頁面
@app.route('/status_feed')
def status_feed():
def generate():
with open('fitness.txt','r') as f:
yield f.read() # return also will work
return Response(generate(), mimetype='text')
## 遊戲傳字頁面
@app.route('/game_status_feed')
def game_status_feed():
def generate():
with open('game.txt','r') as f:
yield f.read() # return also will work
return Response(generate(), mimetype='text')
## 穿衣傳字頁面
@app.route('/tryon_status_feed')
def tryon_status_feed():
def generate():
with open('tryon.txt','r') as f:
yield f.read() # return also will work
return Response(generate(), mimetype='text')
## 登出
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__=='__main__':
import argparse
app.run(debug=True)
|
wheelchair.py
|
#NAME: wheelchair.py
#DATE: 05/08/2019
#AUTH: Ryan McCartney
#DESC: A python class for moving an entity in real-time via and http API
#COPY: Copyright 2019, All Rights Reserved, Ryan McCartney
import threading
import time
import json
import requests
import random
from requests import Session
#define threading wrapper
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class Wheelchair:
debug = False
logFilePath = "logs/log.txt"
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36'}
def __init__(self,config):
self.logging = True
self.stopped = False
self.wheels = config["wheels"]
self.port = config["port"]
self.ipAddress = config["ipAddress"]
self.baseURL = "http://"+str(self.ipAddress)+":"+str(self.port)+"/"
self.error = False
self.timeout = 2 #Seconds
self.pollingStatus = False
self.topSpeed = config["topSpeed"]
self.maxSpeed = config["maxSpeed"]
#Status Flags and Telemetry Variables
self.movementFlag = [0]*self.wheels
self.distance = [0]*self.wheels
self.current = [0]*self.wheels
self.voltage = 0.0
try:
self.session = requests.session()
self.clearLogs()
self.connected = True
except:
self.log("ERROR: Cannot create a session.")
self.connected = False
#Start capturing status packets
self.getStatus()
#Logging Function
def log(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.logFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
print(logEntry)
#Send and Receive Messages with implemented logging
def sendCommand(self, command):
#Start Timing
start = time.time()
#combine with host address
message = self.baseURL + "send?command=" + command
message = message.encode('ascii')
if self.pollingStatus == False:
self.getStatus()
try:
if self.debug == True:
response = self.session.get(message,timeout=self.timeout)
status = response.content.decode("utf-8").split("\n")
self.log("INFO: Transmission response code is "+str(response.status_code))
end = time.time()
self.log("STATUS: Sending '"+str(command)+"' took "+str(round((end-start),2))+" seconds.")
self.log(status[0])
else:
self.session.get(message,timeout=self.timeout)
self.connected = True
except:
self.log("ERROR: Could not access API.")
self.connected = False
@threaded
def getStatus(self):
while self.connected:
self.pollingStatus = True
try:
message = self.baseURL + "getLatest"
response = self.session.get(message,timeout=self.timeout)
status = response.content.split("\n")
#Extract Joint Positions
if(status[0].find("STATUS:")!=-1):
if(status[0].find("MOVEMENT") != -1):
data = status[0].split(",")
self.movementFlag = data
elif(status[0].find("DISTANCE") != -1):
data = status[0].split(",")
self.distance = data
elif(status[0].find("TELEMETRY") != -1):
data = status[0].split(",")
self.voltage = data.pop(0)
self.current = data
else:
self.log("FAILED TO PARSE: "+status[0])
elif(status[0] !=""):
if self.debug:
self.log(status[0])
except:
self.log("INFO: Did not receive status response from API.")
self.pollingStatus = False
def leftThrottle(self,speed):
self.stopped = False
if (speed <= 255) and (speed >= -255):
command = "t,l,"+str(speed)
self.sendCommand(command)
if self.debug:
self.log("INFO: Left wheel speed set to "+str(speed)+".")
else:
self.log("ERROR: Speed out of range.")
def rightThrottle(self,speed):
self.stopped = False
if (speed <= 255) and (speed >= -255):
command = "t,r,"+str(speed)
self.sendCommand(command)
if self.debug:
self.log("INFO: Right wheel speed set to "+str(speed)+".")
else:
self.log("ERROR: Speed out of range.")
def getWheelMovement(self):
return self.movementFlag
def setAccel(self,wheel,accel):
wheelID = self.getWheel(wheel)
if wheelID != "error":
command = "z"+str(wheelID)+str(accel,2)
self.sendCommand(command)
self.log("INFO: "+str(wheel)+ " wheel acceleration rate adjusted to "+str(accel,2)+" mm per second squared.")
def setBaseSpeed(self,speed):
command = "s,b,"+str(spped,2)
self.sendCommand(command)
self.log("INFO: Base wheel speed adjusted to "+str(speed,2)+" mm/s.")
def moveAtSpeed(self,wheel,speed):
wheelID = self.getWheel(wheel)
if (wheelID == "l") or (wheelID == "r"):
command = "s"+str(wheelID)+str(speed,2)
self.sendCommand(command)
self.log("INFO: "+str(wheel)+ " wheel speed set to "+str(accel,2)+" mm/s.")
def stop(self):
self.leftThrottle(0)
self.rightThrottle(0)
self.log("INFO: Wheelchair stopped and brakes applied.")
def softStop(self):
if not self.stopped:
self.leftThrottle(1)
self.rightThrottle(1)
self.stopped = True
if self.debug:
self.log("INFO: Wheelchair stopped, brakes not applied.")
def eStop(self):
self.sendCommand("q")
self.log("INFO: Arm Emergency Stopped.")
def checkConnection(self):
self.sendCommand("test")
self.log("INFO: Testing the connection.")
def clearLogs(self):
url = self.baseURL + "clearLogs"
response = self.session.get(url,timeout=self.timeout)
if response.content.decode("utf-8"):
self.log(response.content.decode("utf-8"))
def resetSerial(self):
messages = ["disconnect","connect"]
for message in messages:
url = self.baseURL + message
response = self.session.get(url,timeout=self.timeout)
if response.content.decode("utf-8"):
self.log(response.content.decode("utf-8"))
time.sleep(1.5)
self.log("INFO: Arm Reset.")
def resetArduino(self):
self.sendCommand("r")
time.sleep(1)
self.log("INFO: Emergency Stop Latch Reset.")
|
dbt_integration_test.py
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
import random
import re
import shutil
import socket
import string
import subprocess
import sys
import threading
from typing import Any, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_config.transform import TransformConfig
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_prefix = f"test_normalization_db_{self.random_string(3)}"
self.db_names = ["postgres", "mysql"]
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def setup_db(self):
self.setup_postgres_db()
self.setup_mysql_db()
def setup_postgres_db(self):
print("Starting localhost postgres container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_postgres",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"postgres",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mysql_db(self):
print("Starting localhost mysql container for tests")
port = self.find_free_port()
config = {
"type": "mysql",
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "root",
"password": "",
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mysql",
"-e",
"MYSQL_ALLOW_EMPTY_PASSWORD=yes",
"-e",
"MYSQL_INITDB_SKIP_TZINFO=yes",
"-e",
f"MYSQL_DATABASE={config['database']}",
"-p",
f"{config['port']}:3306",
"-d",
"mysql",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mysql.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_db(self):
for db_name in self.db_names:
print(f"Stopping localhost {db_name} container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down {db_name}: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
profiles_config["credentials_json"] = json.dumps(profiles_config)
profiles_config["dataset_id"] = self.target_schema
else:
profiles_config["schema"] = self.target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
def dbt_run(self, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command("debug", test_root_dir)
assert self.run_check_dbt_command("deps", test_root_dir)
final_sql_files = os.path.join(test_root_dir, "final")
shutil.rmtree(final_sql_files, ignore_errors=True)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command("run", test_root_dir)
@staticmethod
def run_check_dbt_command(command: str, cwd: str) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
error_count = 0
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/final:/build/run/airbyte_utils/models/generated",
"-v",
"/tmp:/tmp",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
"airbyte/normalization:dev",
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
import logging
import getpass
import multiprocessing
import fnmatch
import copy
import os
import hashlib
import re
import threading
import time
import traceback
import sys
import signal
from random import randint
# Import third party libs
try:
import zmq
except ImportError:
# Running in local, zmq not needed
pass
import yaml
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError
)
import salt.client
import salt.crypt
import salt.loader
import salt.utils
import salt.payload
import salt.utils.schedule
# TODO: should probably use _getargs() from salt.utils?
from salt.state import _getargs
from salt._compat import string_types
from salt.utils.debug import enable_sigusr1_handler
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if opts.get('file_client', 'remote') == 'local' and check_dns:
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: {0} not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.warn(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
else:
ret['master_ip'] = '127.0.0.1'
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Return the directory that process data is stored in
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
else:
# proc_dir is present, clean out old proc files
for proc_fn in os.listdir(fn_):
os.remove(os.path.join(fn_, proc_fn))
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call,
and yamlify all arguments and key-word argument values if:
- they are strings
- they do not contain '\n'
If yamlify results in a dict, and the original argument or kwarg value
did not start with a "{", then keep the original string value.
This is to prevent things like 'echo "Hello: world"' to be parsed as
dictionaries.
'''
spec_args, _, has_kwargs, _ = salt.state._getargs(func)
_args = []
kwargs = {}
for arg in args:
if isinstance(arg, string_types):
arg_name, arg_value = salt.utils.parse_kwarg(arg)
if arg_name:
if has_kwargs or arg_name in spec_args:
kwargs[arg_name] = yamlify_arg(arg_value)
continue
else:
# Not a kwarg
pass
_args.append(yamlify_arg(arg))
if has_kwargs and isinstance(data, dict):
# this function accepts kwargs, pack in the publish data
for key, val in data.items():
kwargs['__pub_{0}'.format(key)] = val
return _args, kwargs
def yamlify_arg(arg):
'''
yaml.safe_load the arg unless it has a newline in it.
'''
try:
original_arg = arg
if isinstance(arg, string_types):
if '\n' not in arg:
arg = yaml.safe_load(arg)
if isinstance(arg, dict):
# dicts must be wrapped in curly braces
if (isinstance(original_arg, string_types) and
not original_arg.startswith("{")):
return original_arg
else:
return arg
elif isinstance(arg, (int, list, string_types)):
return arg
else:
# we don't support this type
return str(original_arg)
except Exception:
# In case anything goes wrong...
return str(original_arg)
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Generate all of the minion side components
self.opts = opts
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
if self.opts.get('file_client', 'remote') == 'remote':
if isinstance(self.opts['master'], list):
masters = self.opts['master']
self.opts['_auth_timeout'] = 3
self.opts['_safe_auth'] = False
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
self.opts.update(resolve_dns(opts))
self.gen_modules()
else:
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = opts
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules()
def gen_modules(self):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(object):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
self.opts = opts
def _gen_minions(self):
'''
Set up and tune in the minion options
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
return False
minions = []
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
try:
minions.append(Minion(s_opts, 5, False))
except SaltClientError:
minions.append(s_opts)
return minions
def minions(self):
'''
Return a list of minion generators bound to the tune_in method
'''
ret = {}
minions = self._gen_minions()
for minion in minions:
if isinstance(minion, dict):
ret[minion['master']] = minion
else:
ret[minion.opts['master']] = {
'minion': minion,
'generator': minion.tune_in_no_block()}
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
'''
# Prepare the minion event system
#
# Start with the publish socket
self.context = zmq.Context()
id_hash = hashlib.md5(self.opts['id']).hexdigest()
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Bind the event sockets
self.epub_sock.bind(epub_uri)
self.epull_sock.bind(epull_uri)
# Restrict access to the sockets
if self.opts.get('ipc_mode', '') != 'tcp':
os.chmod(
epub_sock_path,
448
)
os.chmod(
epull_sock_path,
448
)
self.epoller = zmq.Poller()
module_refresh = False
pillar_refresh = False
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
last = time.time()
auth_wait = self.opts['acceptance_wait_time']
max_wait = auth_wait * 6
while True:
for minion in minions.values():
if isinstance(minion, dict):
continue
if not hasattr(minion, 'schedule'):
continue
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
break
if self.epoller.poll(1):
try:
while True:
package = self.epull_sock.recv(zmq.NOBLOCK)
if package.startswith('module_refresh'):
module_refresh = True
elif package.startswith('pillar_refresh'):
pillar_refresh = True
self.epub_sock.send(package)
except Exception:
pass
# get commands from each master
for master, minion in minions.items():
if 'generator' not in minion:
if time.time() - auth_wait > last:
last = time.time()
if auth_wait < max_wait:
auth_wait += auth_wait
try:
if not isinstance(minion, dict):
minions[master] = {'minion': minion}
t_minion = Minion(minion, 1, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
auth_wait = self.opts['acceptance_wait_time']
except SaltClientError:
continue
else:
continue
if module_refresh:
minion['minion'].module_refresh()
if pillar_refresh:
minion['minion'].pillar_refresh()
minion['generator'].next()
class Minion(object):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True):
'''
Pass in the options dict
'''
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self.__prep_mod_opts()
self.functions, self.returners = self.__load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
def __prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def __load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
self.opts['grains'] = salt.loader.grains(self.opts)
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
return functions, returners
def _fire_master(self, data=None, tag=None, events=None):
'''
Fire an event on the master
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event'}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
else:
return
sreq = salt.payload.SREQ(self.opts['master_uri'])
try:
sreq.send('aes', self.crypticle.dumps(load))
except Exception:
pass
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'])
def _handle_aes(self, load):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, int(self.opts['random_reauth_delay']) )
log.debug("Waiting {0} seconds to re-authenticate".format(reauth_delay))
time.sleep(reauth_delay)
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None or not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners = self.__load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data)
)
process.start()
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
if opts['multiprocessing']:
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
salt.utils.daemonize_if(opts)
sdata = {'pid': os.getpid()}
sdata.update(data)
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {}
function_name = data['fun']
if function_name in minion_instance.functions:
ret['success'] = False
try:
func = minion_instance.functions[data['fun']]
args, kwargs = parse_args_and_kwargs(func, data['arg'], data)
sys.modules[func.__module__].__context__['retcode'] = 0
ret['return'] = func(*args, **kwargs)
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}'
log.debug(msg.format(function_name, str(exc)))
ret['return'] = msg.format(function_name, str(exc))
except CommandExecutionError as exc:
msg = 'A command in {0} had a problem: {1}'
log.error(msg.format(function_name, str(exc)))
ret['return'] = 'ERROR: {0}'.format(str(exc))
except SaltInvocationError as exc:
msg = 'Problem executing "{0}": {1}'
log.error(msg.format(function_name, str(exc)))
ret['return'] = 'ERROR executing {0}: {1}'.format(
function_name, exc
)
except TypeError as exc:
trb = traceback.format_exc()
aspec = _getargs(minion_instance.functions[data['fun']])
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info. Possibly a missing '
'arguments issue: {2}').format(function_name, exc,
aspec)
log.warning(msg)
log.debug(
'TypeError intercepted: {0}\n{1}'.format(exc, trb),
exc_info=True
)
ret['return'] = msg
except Exception:
trb = traceback.format_exc()
msg = 'The minion function caused an exception: {0}'
log.warning(msg.format(trb))
ret['return'] = trb
else:
ret['return'] = '"{0}" is not available.'.format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
minion_instance._return_pub(ret)
if data['ret']:
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = parse_args_and_kwargs(func, data['arg'][ind], data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
minion_instance._return_pub(ret)
if data['ret']:
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
sreq = salt.payload.SREQ(self.opts['master_uri'])
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
try:
if hasattr(self.functions[ret['fun']], '__outputter__'):
oput = self.functions[ret['fun']].__outputter__
if isinstance(oput, string_types):
load['out'] = oput
except KeyError:
pass
try:
ret_val = sreq.send('aes', self.crypticle.dumps(load))
except SaltReqTimeoutError:
ret_val = ''
if isinstance(ret_val, string_types) and not ret_val:
# The master AES key has changed, reauth
self.authenticate()
ret_val = sreq.send('aes', self.crypticle.dumps(load))
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+').write(self.serial.dumps(ret))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': ''}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
while True:
creds = auth.sign_in(timeout, safe)
if creds != 'retry':
log.info('Authentication with master successful!')
break
log.info('Waiting for minion key to be accepted by the master.')
time.sleep(self.opts['acceptance_wait_time'])
self.aes = creds['aes']
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self):
'''
Refresh the functions and returners.
'''
self.functions, self.returners = self.__load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh()
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
exit(0)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
'''
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
getpass.getuser()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion "{0}" trying to tune in'.format(self.opts['id']))
self.context = zmq.Context()
# Prepare the minion event system
#
# Start with the publish socket
id_hash = hashlib.md5(self.opts['id']).hexdigest()
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Bind the event sockets
self.epub_sock.bind(epub_uri)
self.epull_sock.bind(epull_uri)
# Restrict access to the sockets
if self.opts.get('ipc_mode', '') != 'tcp':
os.chmod(
epub_sock_path,
448
)
os.chmod(
epull_sock_path,
448
)
self.poller = zmq.Poller()
self.epoller = zmq.Poller()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.epoller.register(self.epull_sock, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
time.sleep(.5)
loop_interval = int(self.opts['loop_interval'])
while True:
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < loop_interval:
loop_interval = self.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
try:
socks = dict(self.poller.poll(
loop_interval * 1000)
)
if self.socket in socks and socks[self.socket] == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv())
self._handle_payload(payload)
# Check the event system
if self.epoller.poll(1):
try:
while True:
package = self.epull_sock.recv(zmq.NOBLOCK)
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
self.epub_sock.send(package)
except Exception:
pass
except zmq.ZMQError:
# This is thrown by the interrupt caused by python handling the
# SIGCHLD. This is a safe error and we just start the poll
# again
continue
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self.context = zmq.Context()
self.poller = zmq.Poller()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
loop_interval = int(self.opts['loop_interval'])
while True:
try:
socks = dict(self.poller.poll(
loop_interval * 1000)
)
if self.socket in socks and socks[self.socket] == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv())
self._handle_payload(payload)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def destroy(self):
'''
Tear down the minion
'''
if hasattr(self, 'poller'):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
if hasattr(self, 'epoller'):
for socket in self.epoller.sockets.keys():
if socket.closed is False:
socket.close()
self.epoller.unregister(socket)
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts):
self._syndic_interface = opts.get('interface')
self._syndic = True
opts['loop_interval'] = 1
Minion.__init__(self, opts)
def _handle_aes(self, load):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'to' not in data or 'arg' not in data:
return
data['to'] = int(data['to']) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'])
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.LocalClient(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic "{0}" trying to tune in'.format(self.opts['id']))
self.context = zmq.Context()
# Start with the publish socket
self.poller = zmq.Poller()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
while True:
try:
socks = dict(self.poller.poll(
loop_interval * 1000)
)
if self.socket in socks and socks[self.socket] == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv())
self._handle_payload(payload)
time.sleep(0.05)
jids = {}
raw_events = []
while True:
event = self.local.event.get_event(0.5, full=True)
if event is None:
# Timeout reached
break
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if not event['tag'] in jids:
if not 'jid' in event['data']:
# Not a job return
continue
jids[event['tag']] = {}
jids[event['tag']]['__fun__'] = event['data'].get('fun')
jids[event['tag']]['__jid__'] = event['data']['jid']
jids[event['tag']]['__load__'] = salt.utils.jid_load(
event['data']['jid'],
self.local.opts['cachedir'],
self.opts['hash_type'])
jids[event['tag']][event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if not 'retcode' in event['data']:
raw_events.append(event)
if raw_events:
self._fire_master(events=raw_events)
for jid in jids:
self._return_pub(jids[jid], '_syndic_return')
except zmq.ZMQError:
# This is thrown by the interrupt caused by python handling the
# SIGCHLD. This is a safe error and we just start the poll
# again
continue
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def destroy(self):
'''
Tear down the syndic minion
'''
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
if functions is None:
functions = salt.loader.minion_mods(self.opts)
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'glob'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if ':' not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt, delim=':')
def grain_pcre_match(self, tgt):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if ':' not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delim=':', regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def exsel_match(self, tgt):
'''
Runs a function and return the exit code
'''
if tgt not in self.functions:
return False
return(self.functions[tgt]())
def pillar_match(self, tgt):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if ':' not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'], tgt, delim=':')
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as e:
log.debug('Range exception in compound match: {0}'.format(e))
return False
return
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'X': 'exsel',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre',
'D': 'data'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results)
except Exception:
log.error('Invalid compound target: {0}'.format(tgt))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
|
daemon.py
|
import logging
import os
import signal
import socketserver
import sys
import threading
from concurrent import futures
from logging.handlers import RotatingFileHandler
from pathlib import Path
from queue import Queue
from typing import TYPE_CHECKING, Any, NoReturn, Optional, Set, cast
import requests
from bs4 import BeautifulSoup as BS
from daemon import DaemonContext
from requests import exceptions as exs
from typing_extensions import Final
import proofaday.constants as consts
from proofaday.message import Action, Message
from proofaday.proof import InvalidProofException, Proof
from proofaday.status import Status
if TYPE_CHECKING:
# pylint: disable=unsubscriptable-object
StrQueue = Queue[str]
ProofFuture = futures.Future[Optional[str]]
else:
StrQueue = Queue
ProofFuture = futures.Future
class ServerError(Exception):
pass
class ProofHandler(socketserver.BaseRequestHandler):
def handle(self) -> None:
data, sock = self.request
msg = Message.decode(data)
server: ProofServer = cast(ProofServer, self.server)
logger = server.logger
logger.info("Received %s from (%s, %d)", msg.action, *self.client_address)
if msg.action is Action.REQUEST:
logger.info("Fetching %s", msg.data)
proof = server.fetch_proof(msg.data)
reply = proof if proof is not None else ""
elif msg.action is Action.RANDOM:
logger.info("Dequeuing proof")
reply = server.queue.get()
sock.sendto(reply.encode(), self.client_address)
class ProofServer(socketserver.ThreadingUDPServer):
daemon_threads = True
proof_timeout: Final = 1
max_log_bytes: Final = 1024 * 1024
max_threads: Final = 5
def __init__(
self,
port: int,
line_limit: int,
nprefetch: int,
debug: int,
log_path: Path,
status: Status,
) -> None:
self.status = status
if not self.status.touch():
raise ServerError("Status file already exists or couldn't be created.")
super().__init__((consts.HOST, port), ProofHandler)
level = {0: logging.NOTSET, 1: logging.INFO}.get(debug, logging.DEBUG)
self.logger = self.init_logger(level, log_path)
self.queue: StrQueue = Queue(maxsize=nprefetch)
self.limit = line_limit if line_limit > 0 else None
host, port = self.server_address
if not self.status.write(pid=os.getpid(), host=host, port=port):
self.status.remove()
raise ServerError("Failed to write status file.")
threading.Thread(
target=self.fetch_proofs,
daemon=True,
name="ServerLoop",
).start()
@staticmethod
def init_logger(level: int, path: Path) -> logging.Logger:
logger = logging.getLogger(__name__)
logger.setLevel(level)
if level != logging.NOTSET:
path.mkdir(parents=True, exist_ok=True)
handler: logging.Handler = RotatingFileHandler(
path / consts.LOG_FILE,
maxBytes=ProofServer.max_log_bytes,
backupCount=1,
encoding="utf8",
)
else:
handler = logging.NullHandler()
handler.setFormatter(logging.Formatter("%(threadName)s: %(message)s"))
logger.addHandler(handler)
return logger
def server_close(self) -> None:
super().server_close()
status = self.status.read()
if status is not None and status["pid"] == os.getpid():
self.status.remove()
def fetch_proof(self, name: str = consts.RANDOM) -> Optional[str]:
url = consts.URL + name
try:
data = requests.get(url, timeout=ProofServer.proof_timeout).text
html = BS(data, "html.parser")
proof = Proof(html)
self.logger.debug(repr(proof))
return str(proof)
except (ConnectionResetError, exs.Timeout):
pass
except InvalidProofException as e:
self.logger.exception("Invalid proof: %s", str(e))
except Exception as e: # pylint: disable=broad-except
self.logger.exception(
"Unexpected exception while fetching a proof: %s",
str(e),
)
return None
def enqueue_proof(self, proof: str) -> None:
if self.limit is None or len(proof.split("\n")) <= self.limit:
self.queue.put(proof)
def fetch_proofs(self) -> NoReturn:
with futures.ThreadPoolExecutor(
max_workers=ProofServer.max_threads,
thread_name_prefix="Fetcher",
) as pool:
jobs: Set[ProofFuture] = set()
while True:
njobs = self.queue.maxsize - self.queue.qsize() - len(jobs)
if len(jobs) == 0:
njobs = max(njobs, 1)
jobs |= {pool.submit(self.fetch_proof) for _ in range(njobs)}
done, jobs = futures.wait(jobs, return_when=futures.FIRST_COMPLETED)
for job in done:
proof = job.result()
if proof is not None:
self.enqueue_proof(proof)
def spawn(**kwargs: Any) -> None:
with DaemonContext(stdout=sys.stdout, stderr=sys.stderr):
# N.B. shutdown() must be called in a separate thread
signal.signal(
signal.SIGTERM,
lambda signum, frame: threading.Thread(target=server.shutdown).start(),
)
try:
with ProofServer(**kwargs) as server:
server.serve_forever()
except ServerError as e:
sys.exit(str(e))
|
start_mission.py
|
#!/usr/bin/env python
# Python script to start a crazyflie mission.
#
# The script expects the name of the file as a parameter. It is also possible
# to specify the frequency of the thread publishing the position of the
# 'ghost', that is the simulation of the expected trajectory.
#
# Precisely the script will:
# Load a trajectory from file
# Upload the trajectory on the crazyflie
# Ask the crazyflie to takeoff
# Send the command to start the mission(trajectory)
# Start a thread that simulate the trjectory execution
# Ask the crazyflie to land after the end of the mission
#
#
import numpy as np
import rospy
import crazyflie
import time
import uav_trajectory
from threading import Thread
from crazyflie_demo.msg import Trajectory
from tf.transformations import euler_from_matrix
# Trajectory Publisher
ghost_pub = rospy.Publisher('ghost_trajectory', Trajectory, queue_size=10)
def rep_trajectory(trajectory, start_position, freq):
timeSpan = trajectory.duration;
r = rospy.Rate(freq)
print("Running at freq. = ", r)
start_time = rospy.get_time()
curr_time = start_time
print("Current time: ", curr_time)
print("Start time: ", start_time)
print("Expected end time: ", start_time + timeSpan)
end_time = start_time + timeSpan
msg = Trajectory()
# Publishing Loop
while (curr_time < end_time):
# Evaluate the trajectory
rep_trj = trajectory.eval(curr_time - start_time)
msg.px = rep_trj.pos[0]
msg.py = rep_trj.pos[1]
msg.pz = rep_trj.pos[2]
msg.vx = rep_trj.vel[0]
msg.vy = rep_trj.vel[1]
msg.vz = rep_trj.vel[2]
msg.accx = rep_trj.acc[0]
msg.accy = rep_trj.acc[1]
msg.accz = rep_trj.acc[2]
# Conver the Rotation matrix to euler angles
R = rep_trj.R
(roll, pitch, yaw) = euler_from_matrix(R)
msg.r = roll * 180 / np.pi
msg.p = pitch * 180 / np.pi
msg.y = yaw * 180 / np.pi
# Pubblish the evaluated trajectory
ghost_pub.publish(msg)
# Wait the next loop
r.sleep()
# Take the time
curr_time = rospy.get_time()
if __name__ == '__main__':
rospy.init_node('Node_commander')
rospy.loginfo("Starting Node Commander")
cf = crazyflie.Crazyflie("cf1", "/tf")
file_name = rospy.search_param('trajectory_file')
if (file_name):
trj_file = rospy.get_param(file_name)
print("Trajectory file found! ", trj_file)
else:
rospy.signal_shutdown("Trjectory file not found!")
frequency = rospy.get_param('freq_ghost', 30.0);
rospy.loginfo("Uploading Trajectory...")
traj = uav_trajectory.Trajectory()
traj.loadcsv(trj_file)
cf.uploadTrajectory(0, 0, traj)
rospy.loginfo("Trajectory duration: " + str(traj.duration))
time.sleep(3)
cf.takeoff(targetHeight = 0.6, duration = 3.0)
time.sleep(5.0)
cf.goTo(goal = [-1.5, 0, 1.10], yaw=0.0, duration = 2.0, relative = False)
cf.goTo(goal = [-1.5, 0, 1.10], yaw=0.0, duration = 2.0, relative = False)
time.sleep(4.0)
rospy.loginfo("Starting Trajectory")
cf.startTrajectory(0, timescale=1.0)
cf.startTrajectory(0, timescale=1.0)
t = Thread(target=rep_trajectory, args=(traj,[-1.5, 0, 1.10], frequency)).start()
time.sleep(traj.duration / 1.5)
cf.stop()
rospy.loginfo("Landing")
cf.land(targetHeight = 0.05, duration = 2.0)
time.sleep(0.1)
cf.land(targetHeight = 0.05, duration = 2.0)
time.sleep(2)
|
test_docxmlrpc.py
|
from DocXMLRPCServer import DocXMLRPCServer
import httplib
import sys
from test import test_support
threading = test_support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
test_support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn('<dl><dt><a name="-<lambda>"><strong>'
'<lambda></strong></a>(x, y)</dt></dl>',
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
'<tt>Add two instances together. This '
'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
'PEP008</a>, but has nothing<br>\nto do '
'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
'RFC1952</a>. Case should matter: pEp008 '
'and rFC1952. Things<br>\nthat start '
'with http and ftp should be '
'auto-linked, too:<br>\n<a href="http://google.com">'
'http://google.com</a>.</tt></dd></dl>'), response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-system.listMethods"><strong>system.listMethods'
'</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system'
'.listMethods</a>() => [\'add\', \'subtract\','
' \'multiple\']<br>\n <br>\nReturns a list'
' of the methods supported by the'
' server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp">'
'<strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt>'
'<a href="#-system.methodHelp">system.methodHelp</a>(\'add\') '
'=> "Adds two integers together"<br>\n '
'<br>\nReturns a string containing documentation'
' for the specified method.</tt></dd></dl>\n '
'<dl><dt><a name="-system.methodSignature"><strong>system.'
'methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-'
'system.methodSignature">system.methodSignature</a>(\'add\') '
'=> [double, int, int]<br>\n <br>\nReturns'
' a list describing the signature of'
' the method. In the<br>\nabove example,'
' the add method takes two integers'
' as arguments<br>\nand returns a double'
' result.<br>\n <br>\nThis server does '
'NOT support system.methodSignature.</tt></dd></dl>'),
response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn("""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
app.py
|
# -*- coding: utf-8 -*-
import asyncio, sys, os, json, threading, time
from os.path import abspath, dirname, join
from threading import Timer
from paramiko import SSHClient
from sshtunnel import SSHTunnelForwarder
from .AboutDialog import Ui_AboutDialog
from .MainWindow import Ui_MainWindow
from .ConnectDialog import Ui_ConnectDialog
from .EditConnectionDialog import Ui_EditConnectionDialog
from .WebBrowser import Ui_WebBrowser
import PyQt5
from PyQt5.QtWidgets import \
QApplication, QMainWindow, QSystemTrayIcon, QMenu, \
QAction, QWidget, QStyle, QDialog, QMessageBox, \
QListWidgetItem, QToolBar, QLineEdit
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
main_window = None
app_debug = False
def set_window_center(window):
desktop = QApplication.desktop()
screen_number = desktop.screenNumber(desktop.cursor().pos())
center = desktop.screenGeometry(screen_number).center()
window_size = window.size()
width = window_size.width();
height = window_size.height();
x = center.x() - round(width / 2);
y = center.y() - round(height / 2);
window.move ( x, y );
class Connection():
def __init__(self):
self.connection_name = "";
self.host = "";
self.port = "";
self.username = "";
self.password = "";
class AboutDialog(QDialog, Ui_AboutDialog):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.setFixedSize(self.size())
class ConnectDialog(QDialog, Ui_ConnectDialog):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.label.setWordWrap(True)
self.setFixedSize(self.size())
class EditConnectionDialog(QDialog, Ui_EditConnectionDialog):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowTitle("Edit connection")
self.setFixedSize(self.size())
class WebBrowser(QMainWindow, Ui_WebBrowser):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.home_url = ""
self.connect_data = None
self.connect_dialog = None
self.ssh_server = None
self.thread_connect = None
self.is_connected = False
self.setupUi(self)
self.setWindowTitle("Connect to server")
self.setCentralWidget(self.webBrowser)
# Tool Bar
self.toolBar = QToolBar()
self.addToolBar(self.toolBar)
# Buttons
self.prevButton = QAction('Prev', self)
self.nextButton = QAction('Next', self)
self.refreshButton = QAction('Refresh', self)
self.homeButton = QAction('Home', self)
self.urlEdit = QLineEdit()
# Add to toolbar
self.toolBar.addAction(self.prevButton)
self.toolBar.addAction(self.nextButton)
self.toolBar.addAction(self.refreshButton)
#self.toolBar.addAction(self.homeButton)
self.toolBar.addWidget(self.urlEdit)
# Events
self.prevButton.triggered.connect(self.onPrevButtonClick)
self.nextButton.triggered.connect(self.onNextButtonClick)
self.refreshButton.triggered.connect(self.onRefreshButtonClick)
self.homeButton.triggered.connect(self.onHomeButtonClick)
self.urlEdit.returnPressed.connect(self.onUrlEditChange)
self.webBrowser.urlChanged.connect(self.onWebBrowserUrlChange)
def closeEvent(self, event):
self.sshDisconnect()
event.accept()
def sshConnect(self):
try:
time.sleep(1)
if self.connect_data == None:
s = "Error: Connection data is None"
self.connect_dialog.label.setText(s)
if app_debug:
print (s)
return
# Connect to ssh server
data:Connection = self.connect_data
try:
self.ssh_server = SSHTunnelForwarder(
data.host,
ssh_port=data.port,
ssh_username=data.username,
ssh_password=data.password,
set_keepalive=60*60,
remote_bind_address=('127.0.0.1', 80),
local_bind_address=('127.0.0.1', ),
)
self.ssh_server.start()
self.home_url = "http://127.0.0.1:" + str(self.ssh_server.local_bind_port) + "/"
except Exception as e:
s = "Error: Failed connect to {0}:{1}: {2}".format(data.host, data.port, e)
if self.connect_dialog != None:
self.connect_dialog.label.setText(s)
if app_debug:
print (s)
return
if self.connect_dialog != None:
self.connect_dialog.accept()
if app_debug:
print ("Connected")
self.is_connected = True
pass
except Exception as e:
print (e)
def sshDisconnect(self):
if self.thread_connect != None:
#self.thread_connect.stop()
self.thread_connect = None
if self.ssh_server != None:
self.ssh_server.stop()
self.ssh_server = None
self.is_connected = False
if app_debug:
print ("Disconnect")
def connectToServer(self, data:Connection):
self.home_url = ""
if app_debug:
print ("Connect to " + data.host)
# Create connection dialog
self.connect_dialog = ConnectDialog()
# Setup title
connect_title = "Connect to {0} ({1})".format(data.connection_name, data.host)
self.connect_dialog.setWindowTitle(connect_title)
self.setWindowTitle(connect_title)
# Setup connection
self.connect_data = data
# Connect to server
self.thread_connect = threading.Thread(target=self.sshConnect)
self.thread_connect.start()
# Show connection dialog
result = 0
if self.is_connected == False:
result = self.connect_dialog.exec()
if app_debug:
print ("Result: ", result)
# Cancel connect
if (result == 0):
self.sshDisconnect()
# Success connect
if self.is_connected:
if self.home_url != "":
webBrowser:QWebEngineView = self.webBrowser
webBrowser.setUrl( QUrl(self.home_url) )
connect_title = "Connected to {0} ({1})".format(data.connection_name, data.host)
self.setWindowTitle(connect_title)
self.show()
set_window_center(self)
self.connect_dialog = None
pass
def onPrevButtonClick(self):
webBrowser:QWebEngineView = self.webBrowser
webBrowser.back()
def onNextButtonClick(self):
webBrowser:QWebEngineView = self.webBrowser
webBrowser.forward()
def onRefreshButtonClick(self):
webBrowser:QWebEngineView = self.webBrowser
webBrowser.reload()
def onHomeButtonClick(self):
webBrowser:QWebEngineView = self.webBrowser
webBrowser.setUrl( QUrl(self.home_url) )
def onUrlEditChange(self):
url = self.urlEdit.text()
webBrowser:QWebEngineView = self.webBrowser
webBrowser.setUrl( QUrl(url) )
def onWebBrowserUrlChange(self, url):
self.urlEdit.setText(url.toString())
pass
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
# Set a title
self.setupUi(self)
self.setWindowTitle("BAYRELL OS Desktop Client")
self.listWidget.setSortingEnabled(True)
# Set to center
self.setFixedSize(self.size())
set_window_center(self)
# Load items
self.loadItems()
# Button action
self.aboutButton.clicked.connect(self.onAboutClick)
self.addButton.clicked.connect(self.onAddClick)
self.editButton.clicked.connect(self.onEditClick)
self.deleteButton.clicked.connect(self.onDeleteClick)
self.connectButton.clicked.connect(self.onConnectClick)
#self.exitButton.clicked.connect(self.onExitClick)
pass
def show_edit_connection_dialog(self, item:QListWidgetItem = None):
dlg = EditConnectionDialog()
if item != None:
data = item.data(1)
dlg.connectionNameEdit.setText( data.connection_name )
dlg.hostEdit.setText( data.host )
dlg.portEdit.setText( data.port )
dlg.usernameEdit.setText( data.username )
dlg.passwordEdit.setText( data.password )
result = dlg.exec()
if result == 1:
# Create data
data = Connection()
data.connection_name = dlg.connectionNameEdit.text()
data.host = dlg.hostEdit.text()
data.port = dlg.portEdit.text()
data.username = dlg.usernameEdit.text()
data.password = dlg.passwordEdit.text()
# Add data to list widget
if item == None:
item = QListWidgetItem(data.connection_name)
item.setData(1, data)
self.listWidget.addItem(item)
else:
item.setText(data.connection_name)
item.setData(1, data)
def getSettingsFileName(self):
path = os.path.expanduser('~')
path = os.path.join(path, ".config", "bayrell_os")
os.makedirs(path, exist_ok=True)
file_name = os.path.join(path, "settings.json")
return file_name
def loadItems(self):
file_name = self.getSettingsFileName()
file_content = ""
try:
if os.path.exists(file_name):
with open(file_name) as file:
file_content = file.read()
file.close()
settings = json.loads(file_content)
connections = settings["connections"]
for connection in connections:
data = Connection()
data.connection_name = connection["connection_name"]
data.host = connection["host"]
data.port = connection["port"]
data.username = connection["username"]
data.password = connection["password"]
item = QListWidgetItem(data.connection_name)
item.setData(1, data)
self.listWidget.addItem(item)
finally:
pass
pass
def saveItems(self):
connections = []
for row in range(self.listWidget.count()):
item = self.listWidget.item(row)
data = item.data(1)
connection = {
"connection_name": data.connection_name,
"host": data.host,
"port": data.port,
"username": data.username,
"password": data.password,
}
connections.append(connection)
settings = {
"connections": connections
}
text = json.dumps(settings, indent=2)
file_name = self.getSettingsFileName()
with open(file_name, "w") as file:
file.write(text)
file.close()
pass
def onAboutClick(self):
dlg = AboutDialog()
result = dlg.exec()
def onAddClick(self):
self.show_edit_connection_dialog()
self.saveItems()
def onEditClick(self):
items = self.listWidget.selectedIndexes()
if len(items) > 0:
self.show_edit_connection_dialog( self.listWidget.item(items[0].row()) )
self.saveItems()
def onDeleteClick(self):
delete_msg = "Are you sure want to delete selected items?"
result = QMessageBox.question(self, "Delete selected items",
delete_msg, QMessageBox.Yes, QMessageBox.No)
if result == QMessageBox.Yes:
items = self.listWidget.selectedIndexes()
for item in items:
row = item.row()
self.listWidget.takeItem(row)
self.saveItems()
def onConnectClick(self):
items = self.listWidget.selectedIndexes()
if len(items) > 0:
row = items[0].row()
item = self.listWidget.item(row)
data = item.data(1)
web_browser = WebBrowser(self)
web_browser.connectToServer(data)
pass
def onExitClick(self):
quit_msg = "Are you sure want to exit from the app?"
result = QMessageBox.question(self, "Exit from the app",
quit_msg, QMessageBox.Yes, QMessageBox.No)
if result == QMessageBox.Yes:
self.close()
def run():
app = QApplication(sys.argv)
main_window = MainWindow()
main_window.show()
sys.exit(app.exec())
|
wrap_function_multiprocess.py
|
# STDLIB
import sys
from typing import Any
# EXT
import multiprocess # type: ignore
# OWN
try:
from .wrap_helper import WrapHelper, raise_exception
except ImportError: # pragma: no cover
# Import for local DocTest
from wrap_helper import WrapHelper, raise_exception # type: ignore # pragma: no cover
class Timeout(object):
"""Wrap a function and add a timeout (limit) attribute to it.
Instances of this class are automatically generated by the add_timeout
function defined above. Wrapping a function allows asynchronous calls
to be made and termination of execution after a timeout has passed.
"""
def __init__(self, wrap_helper: WrapHelper) -> None:
"""Initialize instance in preparation for being called."""
self.wrap_helper = wrap_helper
self.__name__ = self.wrap_helper.wrapped.__name__
self.__doc__ = self.wrap_helper.wrapped.__doc__
self.__process = None # type: multiprocess.Process
self.__parent_conn = None # type: multiprocess.Pipe
def __call__(self) -> Any:
"""Execute the embedded function object asynchronously.
The function given to the constructor is transparently called and
requires that "ready" be intermittently polled. If and when it is
True, the "value" property may then be checked for returned data.
"""
self.__parent_conn, self.wrap_helper.child_conn = multiprocess.Pipe(duplex=False)
self.__process = multiprocess.Process(target=_target, args=[self.wrap_helper])
# daemonic process must not have subprocess - we need that for nested decorators
self.__process.daemon = False
self.__process.start()
if not self.wrap_helper.dec_hard_timeout:
self.wait_until_process_started()
if self.__parent_conn.poll(self.wrap_helper.dec_timeout_float):
return self.value
else:
self.cancel()
def cancel(self) -> None:
"""Terminate any possible execution of the embedded function."""
if self.__process.is_alive(): # pragma: no cover # we can not produce that state - its just a security measure
self.__process.terminate()
self.__process.join(timeout=1.0)
self.__parent_conn.close()
raise_exception(self.wrap_helper.timeout_exception, self.wrap_helper.exception_message)
def wait_until_process_started(self) -> None:
self.__parent_conn.recv()
@property
def value(self) -> Any:
exception_occured, result = self.__parent_conn.recv()
# when self.__parent_conn.recv() exits, maybe __process is still alive,
# then it might zombie the process. so join it explicitly
self.__process.join(timeout=1.0)
self.__parent_conn.close()
if exception_occured:
raise result
else:
return result
def _target(wrap_helper: WrapHelper) -> None:
"""Run a function with arguments and return output via a pipe.
This is a helper function for the Process created in Timeout. It runs
the function with positional arguments and keyword arguments and then
returns the function's output by way of a queue. If an exception gets
raised, it is returned to Timeout to be raised by the value property.
"""
# noinspection PyBroadException
try:
if not wrap_helper.dec_hard_timeout:
wrap_helper.child_conn.send("started")
exception_occured = False
wrap_helper.child_conn.send((exception_occured, wrap_helper.wrapped(*wrap_helper.args, **wrap_helper.kwargs)))
except Exception:
exception_occured = True
wrap_helper.child_conn.send((exception_occured, sys.exc_info()[1]))
# except BrokenPipeError:
finally:
wrap_helper.child_conn.close()
|
modellist_controller.py
|
from functools import partial
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivymd.uix.list import OneLineListItem, OneLineAvatarIconListItem, TwoLineAvatarIconListItem, MDList, IconRightWidget
from kivymd.uix.dialog import MDDialog
from kivymd.uix.textfield import MDTextField
from kivymd.uix.button import MDFlatButton
from tesseractXplore.app import get_app
from tesseractXplore.app.screens import HOME_SCREEN_ONLINE
from tesseractXplore.controllers import Controller
from tesseractXplore.widgets import LeftCheckbox
class CustomOneLineListItem(OneLineListItem):
icon = StringProperty()
class ModelListController(Controller):
""" Controller class to manage image metadata screen """
def __init__(self, screen, **kwargs):
self.screen = screen
self.layout = MDList()
self.checked_models = None
self.modelinfos = {}
self.screen.model_selection_button.bind(on_release=self.set_model_btn)
self.screen.show_all_chk.bind(active=partial(self.thread_set_list))
def thread_set_list(self, *args, text="", search=False):
import threading
# TODO: Why is threading still blocking the ui and the pb not working?
if not args[0].active:
self.layout.clear_widgets()
self.screen.modellist.clear_widgets()
self.screen.modellist.add_widget(self.layout)
return
self.ocr_single_event = threading.Thread(target=self.process_set_list, args=(args),
kwargs={'text': text,
'search': search})
self.ocr_single_event.setDaemon(True)
self.ocr_single_event.start()
return
def process_set_list(self, *args, text="", search=False):
from kivymd.uix.progressbar import MDProgressBar
pb = MDProgressBar(type="determinate", running_duration=1, catching_duration=1.5)
status_bar = get_app().modellist_controller.screen.status_bar
status_bar.clear_widgets()
status_bar.add_widget(pb)
pb.start()
Clock.schedule_once(partial(self.set_list, self, *args, text=text, search=search))
pb.stop()
def set_list(self, *args, text="", search=False):
''' Lists all installed models '''
def add_item(model):
description = self.modelinfos.get(model).get('description','')
description = 'No description' if description == '' else description
item = TwoLineAvatarIconListItem(
text=model,
secondary_text= description,
on_release=partial(self.set_model, model),
size_hint=(None, None),
size=(600,1)
)
if model not in self.checked_models:
self.checked_models[model] = False
item.add_widget(LeftCheckbox(active=self.checked_models[model]))
item.add_widget(IconRightWidget(icon='file-edit', on_release=partial(self.edit_description, model, description)))
self.layout.add_widget(item)
if self.checked_models is None:
self.checked_models = {}
for model in list(get_app().modelinformations.get_modelinfos().keys()):
self.checked_models[model] = False
else:
self.chk_active_models()
self.layout.clear_widgets()
self.screen.modellist.clear_widgets()
self.modelinfos = get_app().modelinformations.get_modelinfos()
for model in list(self.modelinfos.keys()):
if model == "osd": continue
if self.screen.show_all_chk.active and len(text) == 0:
add_item(model)
if search and len(text) > 1:
if self.screen.exact_match_chk.active:
if text == model[:len(text)]:
add_item(model)
else:
textparts = text.split(" ")
if sum([True if textpart.lower() in model.lower() else False for textpart in textparts]) == len(
textparts):
add_item(model)
elif sum([True if textpart.lower() in " ".join(self.modelinfos.get(model).get('tags', [''])).lower() else False for textpart in textparts]) == len(
textparts):
add_item(model)
self.screen.modellist.add_widget(self.layout)
def edit_description(self, model, description, instance, *args):
def close_dialog(instance, *args):
instance.parent.parent.parent.parent.dismiss()
dialog = MDDialog(title=f"Edit the description of {model}",
type='custom',
auto_dismiss=False,
content_cls=MDTextField(text=description,mode="rectangle"),
buttons=[
MDFlatButton(
text="SAVE", on_release=partial(self.save_description, model, instance)
),
MDFlatButton(
text="DISCARD", on_release=close_dialog
),
],
)
if get_app()._platform not in ['win32', 'win64']:
# TODO: Focus function seems buggy in win
dialog.content_cls.focused = True
dialog.open()
def save_description(self, model, model_instance, dialog_instance, *args):
dialog_instance.parent.parent.parent.parent.dismiss()
model_instance.parent.parent.children[2].children[1].text = dialog_instance.parent.parent.parent.children[2].children[0].text
modelinfo = get_app().modelinformations.get_modelinfos().get(model)
modelinfo['description'] = dialog_instance.parent.parent.parent.children[2].children[0].text
get_app().modelinformations.update_modelinformations(model, modelinfo)
get_app().modelinformations.write_modelinfos()
def chk_active_models(self):
for model in self.layout.children:
self.checked_models[model.children[2].children[2].text] = model.children[1].children[0].active
def set_model_btn(self, instance, *args):
self.set_model("")
def set_model(self, model, *args):
selected_models = []
if model != "": selected_models.append(model)
self.chk_active_models()
for chk_model, state in self.checked_models.items():
if state and chk_model != model:
selected_models.append(chk_model)
if get_app().home_screen == HOME_SCREEN_ONLINE:
get_app().tesseract_online_controller.screen.model.set_item('Model: ' + '+'.join(selected_models))
else:
get_app().tesseract_controller.screen.model.set_item('Model: ' + '+'.join(selected_models))
get_app().switch_screen(get_app().home_screen)
|
test_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : YongJie-Xie
@Contact : fsswxyj@qq.com
@DateTime : 0000-00-00 00:00
@Description : 多线程同步变量类的测试类
@FileName : test_variable.py
@License : MIT License
@ProjectName : Py3Scripts
@Software : PyCharm
@Version : 1.0
"""
import time
from threading import Thread
from basic import Logger, SyncVariable, GlobalSyncVariable
logger = Logger('test_variable', simplify=False)
def accumulate(target, number):
for num in range(1, number + 1):
target.variable += num
logger.info('[%s] num: %s, now: %s', target.__class__.__name__, num, target)
@logger.warning('Testing SyncVariable Object.')
def test_sync_variable():
class TestSyncVariable(SyncVariable):
def __init__(self):
super().__init__(0)
thread_list = []
for i in range(3):
thread_list.append(Thread(target=accumulate, args=(TestSyncVariable(), 10)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
@logger.warning('Testing GlobalSyncVariable Object.')
def test_global_sync_variable():
class TestGlobalSyncVariable(GlobalSyncVariable):
def __init__(self):
super().__init__(0)
thread_list = []
for i in range(3):
thread_list.append(Thread(target=accumulate, args=(TestGlobalSyncVariable(), 10)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
def main():
test_sync_variable()
time.sleep(1)
test_global_sync_variable()
if __name__ == '__main__':
main()
|
hyper-rsync.py
|
#!/usr/bin/env python
from subprocess import call
import sys
from threading import Thread
from Queue import Queue
import subprocess
import os
queue = Queue()
num = 9 #number of worker threads
def stream(i,q,dest = "/tmp/sync_dir_B/"):
"""Spawns stream for each file"""
while True:
fullpath = q.get()
print "Running rsync on %s" % fullpath
cmd = "rsync -av %s %s" % (fullpath, dest)
status = call(cmd, shell=True)
#if status != 0:
# print "Stream Failed"
# sys.exit(1)
q.task_done()
def controller():
#spawn N worker pool threads
for i in range(num):
worker = Thread(target=stream, args=(i,queue))
worker.setDaemon(True)
worker.start()
#populate queue with files
for dirpath, dirnames, filenames in os.walk("/tmp/sync_dir_A"):
for file in filenames:
path = os.path.join(dirpath, file)
print path
queue.put(path)
print "Main Thread Waiting"
queue.join()
print "Done"
if __name__ == "__main__":
import time
start = time.time()
controller()
print "finished in %s" % (time.time() - start)
#作用未知,貌似队列的
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
# https://github.com/lotapp/cpython3/blob/master/Lib/multiprocessing/managers.py
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token']
#
# Imports
#
import sys
import threading
import array
import queue
import time
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [
type(getattr({}, name)()) for name in ('items', 'keys', 'values')
]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj), )
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = [
'shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info',
'number_of_objects', 'dummy', 'incref', 'decref'
]
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError("Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c, ))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError as second_ke:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed))
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(self, conn, ident, obj, *args,
**kwds)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__': fallback_str,
'__repr__': fallback_repr,
'#GETVALUE': fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle': (connection.Listener, connection.Client),
'xmlrpclib': (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
managers的基类
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self,
address=None,
authkey=None,
serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError("Unknown state {!r}".format(
self._state.value))
return Server(self._registry, self._address, self._authkey,
self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError("Unknown state {!r}".format(
self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self,
type(self)._finalize_manager,
args=(self._process, self._address, self._authkey, self._state,
self._Client),
exitpriority=0)
@classmethod
def _run_server(cls,
registry,
address,
authkey,
serializer,
writer,
initializer=None,
initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid, ) + args,
kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError("Unknown state {!r}".format(
self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls,
typeid,
callable=None,
proxytype=None,
exposed=None,
method_to_typeid=None,
create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (callable, exposed, method_to_typeid,
proxytype)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token,
self._serializer,
manager=self,
authkey=self._authkey,
exposed=exp)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id, ))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self,
token,
serializer,
manager=None,
authkey=None,
exposed=None,
incref=True,
manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name, ))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token,
self._serializer,
manager=self._manager,
authkey=self._authkey,
exposed=exposed)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id, ))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id, ))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self,
BaseProxy._decref,
args=(self._token, self._authkey, state, self._tls, self._idset,
self._Client),
exitpriority=10)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id, ))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy, (AutoProxy, self._token, self._serializer,
kwds))
else:
return (RebuildProxy, (type(self), self._token, self._serializer,
kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (kwds.pop('incref', True)
and not getattr(process.current_process(), '_inheriting', False))
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec(
'''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy, ), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token,
serializer,
manager=None,
authkey=None,
exposed=None,
incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token, ))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(
token, serializer, manager=manager, authkey=authkey, incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self._typecode,
self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking, ) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout, ))
def notify(self, n=1):
return self._callmethod('notify', (n, ))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout, ))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout, ))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties', ))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting', ))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken', ))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key, ))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key, ))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value, ))
value = property(get, set)
BaseListProxy = MakeProxyType(
'BaseListProxy',
('__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count',
'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort',
'__imul__'))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value, ))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value, ))
return self
DictProxy = MakeProxyType(
'DictProxy', ('__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'))
ArrayProxy = MakeProxyType('ArrayProxy',
('__len__', '__getitem__', '__setitem__'))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply',
'apply_async',
'close',
'imap',
'imap_unordered',
'join',
'map',
'map_async',
'starmap',
'starmap_async',
'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
`BaseManager`的子类,支持许多共享对象类型。
Subclass of `BaseManager` which supports a number of shared object types.
注册的类型是用于线程同步的类型,加上`dict`,`list`和`Namespace`。
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
`multiprocessing.Manager()`函数创建此类的已启动实例。
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2021 Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (8, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
IS_PY2 = sys.version_info[0] == 2
CAN_CALL_CPUID_IN_SUBPROCESS = True
g_trace = None
class Trace(object):
def __init__(self, is_active, is_stored_in_string):
self._is_active = is_active
if not self._is_active:
return
from datetime import datetime
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
if is_stored_in_string:
self._output = StringIO()
else:
date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w')
self._stdout = StringIO()
self._stderr = StringIO()
self._err = None
def header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def success(self):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("Success ... ({0} {1})\n\n".format(file, line))
self._output.flush()
def fail(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
if isinstance(msg, str):
msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n'
self._output.write(msg)
self._output.write("Failed ... ({0} {1})\n\n".format(file, line))
self._output.flush()
elif isinstance(msg, Exception):
from traceback import format_exc
err_string = format_exc()
self._output.write("\tFailed ... ({0} {1})\n".format(file, line))
self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n')
self._output.flush()
def command_header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[3]
file = frame[1]
line = frame[2]
self._output.write("\t{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def command_output(self, msg, output):
if not self._is_active: return
self._output.write("\t\t{0}\n".format(msg))
self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n')
self._output.flush()
def keys(self, keys, info, new_info):
if not self._is_active: return
from inspect import stack
frame = stack()[2]
file = frame[1]
line = frame[2]
# List updated keys
self._output.write("\tChanged keys ({0} {1})\n".format(file, line))
changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]]
if changed_keys:
for key in changed_keys:
self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key]))
else:
self._output.write('\t\tNone\n')
# List new keys
self._output.write("\tNew keys ({0} {1})\n".format(file, line))
new_keys = [key for key in keys if key in new_info and key not in info]
if new_keys:
for key in new_keys:
self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key]))
else:
self._output.write('\t\tNone\n')
self._output.write('\n')
self._output.flush()
def write(self, msg):
if not self._is_active: return
self._output.write(msg + '\n')
self._output.flush()
def to_dict(self, info, is_fail):
return {
'output' : self._output.getvalue(),
'stdout' : self._stdout.getvalue(),
'stderr' : self._stderr.getvalue(),
'info' : info,
'err' : self._err,
'is_fail' : is_fail
}
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
is_beos = 'beos' in uname or 'haiku' in uname
return is_beos and len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString")
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier")
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE")
return arch_string_raw
@staticmethod
def winreg_hz_actual():
hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz")
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet")
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
p1, p2, stdout_output, stderr_output = None, None, None, None
g_trace.command_header('Running command "' + ' '.join(command) + '" ...')
# Run the command normally
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
# Run the command and pipe it into another command
else:
p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE)
p2.stdout.close()
# Get the stdout and stderr
stdout_output, stderr_output = p1.communicate()
if not IS_PY2:
stdout_output = stdout_output.decode(encoding='UTF-8')
stderr_output = stderr_output.decode(encoding='UTF-8')
# Send the result to the logger
g_trace.command_output('return code:', str(p1.returncode))
g_trace.command_output('stdout:', stdout_output)
# Return the return code and stdout
return p1.returncode, stdout_output
def _read_windows_registry_key(key_name, field_name):
g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name))
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
value = winreg.QueryValueEx(key, field_name)[0]
winreg.CloseKey(key)
g_trace.command_output('value:', str(value))
return value
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8',
'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64']:
raise Exception("py-cpuinfo currently only works on X86 "
"and some ARM/PPC/S390X/MIPS CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
g_trace.keys(keys, info, new_info)
# Update the keys with new values
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Sometimes ',' is used as a decimal separator
ticks = ticks.replace(',', '.')
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _friendly_bytes_to_int(friendly_bytes):
input = friendly_bytes.lower()
formats = {
'gb' : 1024 * 1024 * 1024,
'mb' : 1024 * 1024,
'kb' : 1024,
'g' : 1024 * 1024 * 1024,
'm' : 1024 * 1024,
'k' : 1024,
'b' : 1,
}
try:
for pattern, multiplier in formats.items():
if input.endswith(pattern):
return int(input.split(pattern)[0].strip()) * multiplier
except Exception as err:
pass
return friendly_bytes
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer(r"\(", cpu_string)]
ends = [m.start() for m in re.finditer(r"\)", cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except Exception as err:
g_trace.fail(err)
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match(r'^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
# S390X
elif re.match(r'^s390x$', arch_string_raw):
arch = 'S390X'
bits = 64
elif arch_string_raw == 'mips':
arch = 'MIPS_32'
bits = 32
elif arch_string_raw == 'mips64':
arch = 'MIPS_64'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing(trace):
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
trace.fail('Failed to find sestatus.')
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
trace.fail('Failed to run sestatus. Skipping ...')
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap)
trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory)
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _filter_dict_keys_with_empty_values(info):
# Filter out None, 0, "", (), {}, []
info = {k: v for k, v in info.items() if v}
# Filter out (0, 0)
info = {k: v for k, v in info.items() if v != (0, 0)}
# Filter out strings that start with "0.0"
info = {k: v for k, v in info.items() if not (type(v) == str and v.startswith('0.0'))}
return info
class ASM(object):
def __init__(self, restype=None, argtypes=(), machine_code=[]):
self.restype = restype
self.argtypes = argtypes
self.machine_code = machine_code
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
def compile(self):
machine_code = bytes.join(b'', self.machine_code)
self.size = ctypes.c_size_t(len(machine_code))
if DataSource.is_windows:
# Allocate a memory segment the size of the machine code, and make it executable
size = len(machine_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not self.address:
raise Exception("Failed to VirtualAlloc")
# Copy the machine code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(self.address, machine_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC
# Allocate a private and executable memory segment the size of the machine code
machine_code = bytes.join(b'', self.machine_code)
self.size = len(machine_code)
self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC)
# Copy the machine code into the memory segment
self.mm.write(machine_code)
self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes)
self.func = functype(self.address)
def run(self):
# Call the machine code like a function
retval = self.func()
return retval
def free(self):
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE)
else:
self.mm.close()
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
class CPUID(object):
def __init__(self, trace=None):
if trace == None:
trace = Trace(False, False)
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing(trace)
def _asm_func(self, restype=None, argtypes=(), machine_code=[]):
asm = ASM(restype, argtypes, machine_code)
asm.compile()
return asm
def _run_asm(self, *machine_code):
asm = ASM(ctypes.c_uint32, (), machine_code)
asm.compile()
retval = asm.run()
asm.free()
return retval
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping_id = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family_id = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model_id = (eax >> 16) & 0xF # 4 bits
extended_family_id = (eax >> 20) & 0xFF # 8 bits
family = 0
if family_id in [15]:
family = extended_family_id + family_id
else:
family = family_id
if family_id in [6, 15]:
model = (extended_model_id << 4) + model
return {
'stepping' : stepping_id,
'model' : model,
'family' : family,
'processor_type' : processor_type
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_b' : (ecx & 0xFF) * 1024,
'associativity' : (ecx >> 12) & 0xF,
'line_size_b' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks_func(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32 = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
# Monkey patch func to combine high and low args into one return
old_func = get_ticks_x86_32.func
def new_func():
# Pass two uint32s into function
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
old_func(ctypes.byref(high), ctypes.byref(low))
# Shift the two uint32s into one uint64
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
return retval
get_ticks_x86_32.func = new_func
retval = get_ticks_x86_32
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64 = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64
return retval
def get_raw_hz(self):
from time import sleep
ticks_fn = self.get_ticks_func()
start = ticks_fn.func()
sleep(1)
end = ticks_fn.func()
ticks = (end - start)
ticks_fn.free()
return ticks
def _get_cpu_info_from_cpuid_actual():
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
trace = Trace(True, True)
info = {}
# Pipe stdout and stderr to strings
sys.stdout = trace._stdout
sys.stderr = trace._stderr
try:
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return trace.to_dict(info, True)
# Return none if SE Linux is in enforcing mode
cpuid = CPUID(trace)
if cpuid.is_selinux_enforcing:
trace.fail('SELinux is enforcing. Skipping ...')
return trace.to_dict(info, True)
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : cache_info['size_b'],
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : cache_info['associativity'],
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = _filter_dict_keys_with_empty_values(info)
trace.success()
except Exception as err:
from traceback import format_exc
err_string = format_exc()
trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n'
return trace.to_dict(info, True)
return trace.to_dict(info, False)
def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
queue.put(_obj_to_b64(output))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
g_trace.header('Tying to get info from CPUID ...')
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
g_trace.fail('Can\'t CPUID. Skipping ...')
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
g_trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return {}
try:
if CAN_CALL_CPUID_IN_SUBPROCESS:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
return {}
# Return {} if no results
if queue.empty():
g_trace.fail('Failed to get anything from CPUID process. Skipping ...')
return {}
# Return the result, only if there is something to read
else:
output = _b64_to_obj(queue.get())
import pprint
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(output)
if 'output' in output and output['output']:
g_trace.write(output['output'])
if 'stdout' in output and output['stdout']:
sys.stdout.write('{0}\n'.format(output['stdout']))
sys.stdout.flush()
if 'stderr' in output and output['stderr']:
sys.stderr.write('{0}\n'.format(output['stderr']))
sys.stderr.flush()
if 'is_fail' not in output:
g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...')
return {}
# Fail if there was an exception
if 'err' in output and output['err']:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
g_trace.write(output['err'])
g_trace.write('Failed ...')
return {}
if 'is_fail' in output and output['is_fail']:
g_trace.write('Failed ...')
return {}
if 'info' not in output or not output['info']:
g_trace.fail('Failed to get return info from CPUID process. Skipping ...')
return {}
return output['info']
else:
# FIXME: This should write the values like in the above call to actual
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
g_trace.success()
return output['info']
except Exception as err:
g_trace.fail(err)
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
g_trace.header('Tying to get info from /proc/cpuinfo ...')
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...')
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
if flags:
flags = flags.split()
flags.sort()
# Check for other cache format
if not cache_size:
try:
for i in range(0, 10):
name = "cache{0}".format(i)
value = _get_field(False, output, None, None, name)
if value:
value = [entry.split('=') for entry in value.split(' ')]
value = dict(value)
if 'level' in value and value['level'] == '3' and 'size' in value:
cache_size = value['size']
break
except Exception:
pass
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _friendly_bytes_to_int(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
g_trace.header('Tying to get info from cpufreq-info ...')
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
g_trace.fail('Failed to find cpufreq-info. Skipping ...')
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
g_trace.fail('Failed to run cpufreq-info. Skipping ...')
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
g_trace.header('Tying to get info from lscpu ...')
try:
if not DataSource.has_lscpu():
g_trace.fail('Failed to find lscpu. Skipping ...')
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
g_trace.fail('Failed to run lscpu. Skipping ...')
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
else:
brand = _get_field(False, output, None, None, 'Model')
if brand and not brand.isdigit():
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
if l2_cache_size:
info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the dmesg ...')
# Just return {} if this arch has an unreliable dmesg log
arch, bits = _parse_arch(DataSource.arch_string_raw)
if arch in ['S390X']:
g_trace.fail('Running on S390X. Skipping ...')
return {}
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
g_trace.fail('Failed to find dmesg. Skipping ...')
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"dmesg -a\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
g_trace.header('Tying to get info from lsprop ...')
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
g_trace.fail('Failed to find lsprop. Skipping ...')
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...')
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...')
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...')
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
g_trace.header('Tying to get info from sysctl ...')
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
g_trace.fail('Failed to find sysctl. Skipping ...')
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : int(cache_size) * 1024,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 1 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 2 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
g_trace.header('Tying to get info from wmic ...')
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...')
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run wmic. Skipping ...')
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes
if l2_cache_size:
l2_cache_size = int(l2_cache_size) * 1024
l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes
if l3_cache_size:
l3_cache_size = int(l3_cache_size) * 1024
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
g_trace.header('Tying to get info from Windows registry ...')
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
g_trace.fail('Not running on Windows. Skipping ...')
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
g_trace.header('Tying to get info from kstat ...')
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
g_trace.fail('Failed to find isinfo or kstat. Skipping ...')
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...')
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...')
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_platform_uname():
g_trace.header('Tying to get info from platform.uname ...')
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
g_trace.write('!' * 80)
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
g_trace.write("python_version: {0}".format(info['python_version']))
g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version']))
g_trace.write("arch: {0}".format(info['arch']))
g_trace.write("bits: {0}".format(info['bits']))
g_trace.write("count: {0}".format(info['count']))
g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw']))
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
# FIXME: This should print stdout and stderr to trace log
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
g_trace.write('!' * 80)
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file')
args = parser.parse_args()
global g_trace
g_trace = Trace(args.trace, False)
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
g_trace = Trace(False, False)
_check_arch()
|
recipe-475217.py
|
from os.path import basename
from Queue import Queue
from random import random, seed
from sys import argv, exit
from threading import Thread
from time import sleep
################################################################################
class Widget:
pass
class Stack:
def __init__(self):
self.__stack = list()
def __len__(self):
return len(self.__stack)
def push(self, item):
self.__stack.append(item)
def pop(self):
return self.__stack.pop()
################################################################################
def main():
parse_argv()
run_flag, buffer_queue, producer_stack, consumer_stack, print_queue = [True], Queue(argv[1]), Stack(), Stack(), Queue()
producer_thread = Thread(target=producer, args=(run_flag, argv[3], buffer_queue, producer_stack, print_queue))
consumer_thread = Thread(target=consumer, args=(run_flag, producer_thread, buffer_queue, consumer_stack, argv[4], print_queue))
printer_thread = Thread(target=printer, args=(run_flag, consumer_thread, print_queue))
producer_thread.start()
consumer_thread.start()
printer_thread.start()
sleep(argv[2])
run_flag[0] = False
printer_thread.join()
check_results(producer_stack , consumer_stack)
def parse_argv():
try:
assert len(argv) > 4
argv[1] = abs(int(argv[1]))
argv[2] = abs(float(argv[2]))
assert argv[1] and argv[2]
argv[3] = abs(float(argv[3]))
argv[4] = abs(float(argv[4]))
if len(argv) > 5:
seed(convert(' '.join(argv[5:])))
except:
print basename(argv[0]), '<buff_size> <main_time> <prod_time> <cons_time> [<seed>]'
exit(1)
def convert(string):
number = 1
for character in string:
number <<= 8
number += ord(character)
return number
def check_results(producer_stack , consumer_stack):
print 'Solution has',
try:
assert len(producer_stack) == len(consumer_stack)
while producer_stack:
assert producer_stack.pop() is consumer_stack.pop()
print 'passed.'
except:
print 'failed.'
################################################################################
def producer(run_flag, max_time, buffer_queue, producer_stack, print_queue):
while run_flag[0]:
sleep(random() * max_time)
widget = Widget()
buffer_queue.put(widget)
producer_stack.push(widget)
print_queue.put('Producer: %s Widget' % id(widget))
def consumer(run_flag, producer_thread, buffer_queue, consumer_stack, max_time, print_queue):
while run_flag[0] or producer_thread.isAlive() or not buffer_queue.empty():
widget = buffer_queue.get()
consumer_stack.push(widget)
sleep(random() * max_time)
print_queue.put('Consumer: %s Widget' % id(widget))
def printer(run_flag, consumer_thread, print_queue):
while run_flag[0] or consumer_thread.isAlive() or not print_queue.empty():
if print_queue.empty():
sleep(0.1)
else:
print print_queue.get()
################################################################################
if __name__ == '__main__':
main()
|
rest_generic_adapter.py
|
# -*- coding: utf-8 -*-
"""Module for starting an adapters that read data from REST APIs."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo,
# Teemu Laukkarinen ja Ulla-Talvikki Virta
import importlib
import queue
import sys
import threading
import time
try:
import adapters.common_utils as common_utils
except:
# used when running the module directly
import common_utils
DEFAULT_CONFIG_SCHEME = "rest_api_configuration.json"
def generic_website_worker(worker, website, website_queue):
worker_object = worker(website, website_queue)
name = website.get("name", "Unknown")
verbose_limit = website.get("verbose", 0)
success_count = 0
while True:
wait_time = worker_object.getWaitingTime()
# print(common_utils.getTimeString(), name, "worker going to sleep for", round(wait_time, 1), "seconds.")
time.sleep(wait_time)
success = worker_object.getData()
if success:
success_count += 1
if 0 < verbose_limit <= success_count:
print(common_utils.getTimeString(), " Data from ", name, " worker", sep="", end="")
if success_count > 1:
print(",", success_count, "times.")
else:
print(".")
success_count = 0
else:
print(common_utils.getTimeString(), "No data from", name, "worker.")
if __name__ == "__main__":
if len(sys.argv) == 3:
website_config_filename = sys.argv[1]
config_scheme_filename = sys.argv[2]
elif len(sys.argv) == 2:
website_config_filename = sys.argv[1]
config_scheme_filename = DEFAULT_CONFIG_SCHEME
else:
print("Start this adapter with 'python3", sys.argv[0], "website_config.json (config_scheme.json) command")
website_config_filename = ""
config_scheme_filename = DEFAULT_CONFIG_SCHEME
quit()
# read configuration information from the configuration files
print("Reading configurations")
websites = common_utils.readConfig(website_config_filename)
configurations = common_utils.readConfig(config_scheme_filename)
# start the data queue used to send data to Procem
data_queue = queue.Queue()
threading.Thread(target=common_utils.procemSendWorker, kwargs={"data_queue": data_queue}).start()
for website_id, current_website in websites.items():
try:
website_conf_name = current_website["configuration"]
website_conf = configurations[website_conf_name]
current_website["config"] = website_conf
website_module_name = website_conf["worker"]["module"]
website_module = importlib.import_module(website_module_name)
website_worker_name = website_conf["worker"]["name"]
website_worker = getattr(website_module, website_worker_name, None)
current_website["name"] = website_id
except Exception as error:
print(error)
website_worker = None
if website_worker is not None:
print("Starting thread for REST API: ", website_id, sep="")
time.sleep(1.0)
website_thread = threading.Thread(
target=generic_website_worker,
kwargs={"worker": website_worker, "website": current_website, "website_queue": data_queue},
daemon=True)
website_thread.start()
while True:
txt = input("Press enter key to end:\n\r")
if not txt:
data_queue.put(None)
break
|
IntegrationTests.py
|
from __future__ import absolute_import
import multiprocessing
import os
import platform
import threading
import time
import unittest
import percy
import flask
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class IntegrationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
options = Options()
capabilities = DesiredCapabilities.CHROME
capabilities['loggingPrefs'] = {'browser': 'SEVERE'}
if 'DASH_TEST_CHROMEPATH' in os.environ:
options.binary_location = os.environ['DASH_TEST_CHROMEPATH']
cls.driver = webdriver.Chrome(options=options, desired_capabilities=capabilities)
loader = percy.ResourceLoader(
webdriver=cls.driver,
base_url='/assets',
root_dir='test/assets'
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(self):
pass
def tearDown(self):
if platform.system() == 'Windows':
requests.get('http://localhost:8050/stop')
else:
self.server_process.terminate()
self.clear_log()
time.sleep(1)
def startServer(self, app):
"""
:param app:
:type app: dash.Dash
:return:
"""
if 'DASH_TEST_PROCESSES' in os.environ:
processes = int(os.environ['DASH_TEST_PROCESSES'])
else:
processes = 4
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=processes,
threaded=False,
)
def run_windows():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
@app.server.route('/stop')
def _stop_server_windows():
stopper = flask.request.environ['werkzeug.server.shutdown']
stopper()
return 'stop'
app.run_server(
port=8050,
debug=False,
threaded=True
)
# Run on a separate process so that it doesn't block
system = platform.system()
if system == 'Windows':
# multiprocess can't pickle an inner func on windows (closure are not serializable by default on windows)
self.server_thread = threading.Thread(target=run_windows)
self.server_thread.start()
else:
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
time.sleep(2)
# Visit the dash page
self.driver.get('http://localhost:8050')
def clear_log(self):
entries = self.driver.get_log("browser")
if entries:
self.last_timestamp = entries[-1]["timestamp"]
def get_log(self):
entries = self.driver.get_log("browser")
return [entry for entry in entries if entry["timestamp"] > self.last_timestamp]
last_timestamp = 0
|
graph.py
|
#!/usr/bin/env python3.7
import logging
import random
import os
import threading
import uuid
from urllib.parse import quote
import falcon
import hug
import hug.types
import plotly.graph_objs as go
import plotly.plotly as py
import requests
import requests_html
from cachetools import func
IMDB_URL = 'https://www.imdb.com'
py.sign_in(os.environ['PLOTLY_USERNAME'], os.environ['PLOTLY_API_KEY'])
session = requests_html.HTMLSession()
api = hug.API(__name__)
@func.ttl_cache(maxsize=32, ttl=3600)
def create_graph(title):
results = dict()
# find a candidate (with English as accept language to avoid geolocalized title names)
search_res = session.get(IMDB_URL + f'/find?q={title}&s=tt&ttype=tv', headers={'Accept-Language': 'en'})
candidate = search_res.html.find('.findResult .result_text a', first=True)
if not candidate: raise Exception(f'Oh no! No TV series was found with the name: {title}')
tt_id = candidate.search('/title/{}/')[0]
title = candidate.text
# get seasons
seasons_res = session.get(IMDB_URL + f'/title/{tt_id}/episodes/_ajax')
seasons = [s.attrs['value'] for s in seasons_res.html.find('#bySeason option')]
if not seasons: raise Exception(f'Oh no! No seasons were found for: {title}')
for season in seasons:
# get ratings
ratings_res = session.get(IMDB_URL + f'/title/{tt_id}/episodes/_ajax?season={season}')
rows = ratings_res.html.find('.info')
if not rows: raise Exception(f'Oh no! No ratings were found for: {title}')
# parse ratings
for row in rows:
ep_number = int(row.find('[itemprop="episodeNumber"]', first=True).attrs['content'])
if ep_number < 1: continue # episode doesn't belong in a season (eg. special)
if not row.find('.ipl-rating-widget'): continue # episode hasn't aired yet
if row.find('.ipl-rating-star--placeholder'): continue # episode hasn't been rated yet
ep_rating = float(row.find('.ipl-rating-star__rating', first=True).text)
results.setdefault(season, []).append(ep_rating)
# create graph data
data = []
episodes = 0
for season, ratings in results.items():
data.append(go.Scatter(
name='S' + str(season),
x=list(range(episodes + 1, episodes + len(ratings) + 1)),
y=ratings,
mode='lines+markers',
marker=dict(size=5)
))
episodes += len(ratings)
# set up layout
layout = go.Layout(
title=f'<b>IMDb ratings of {title} episodes</b>',
yaxis=dict(title='Rating', range=[0, 10.1], tickmode='linear', tick0=0,
dtick=2.5, tickformat='.1f', tickprefix=' ' * 10),
xaxis=dict(title='Episode', range=[0, episodes + 1], tickmode='array',
tickvals=[1, episodes], showgrid=False),
margin=go.layout.Margin(l=100, pad=10),
showlegend=False,
width=1200,
height=400
)
fig = go.Figure(data=data, layout=layout)
output = py.image.get(fig)
return output
@hug.output_format.on_valid('image/png')
def format_as_png_when_valid(data):
return data
@hug.get(output=format_as_png_when_valid, examples='title=Breaking%20Bad')
def graph(title: hug.types.text):
"""Returns an IMDb ratings graph of the given TV series"""
return create_graph(title)
@hug.get(output_invalid=hug.output_format.text, examples='text=Breaking%20Bad&response_url=callback',
on_invalid=lambda x: 'Have you tried turning it off and on again? :troll:')
def slack(text: hug.types.text, response_url: hug.types.text, request=None):
"""Sends a delayed response to callback url for Slack integration"""
title = text
if text == 'top250':
top250_res = session.get(IMDB_URL + '/chart/toptv', headers={'Accept-Language': 'en'})
candidates = top250_res.html.find('.chart .titleColumn a')
title = random.choice(candidates).text
t = threading.Thread(target=slack_post, args=(response_url, request.prefix, title))
t.start()
return dict(
response_type='in_channel',
text='I will get right on that! :construction_worker:'
)
@hug.not_found(output=hug.output_format.json)
def not_found(documentation: hug.directives.documentation):
return documentation
@hug.exception(Exception)
def handle_exception(exception):
logging.exception('An exception with the following traceback occurred:')
raise falcon.HTTPInternalServerError('error', str(exception))
def slack_post(response_url, prefix, title):
create_graph(title)
requests.post(response_url, json=dict(
response_type='in_channel',
attachments=[
dict(image_url=prefix + f'/graph?title={quote(title)}&uuid={uuid.uuid4()}')
]
))
|
lastlayerbayesian.py
|
# wiseodd/last_layer_laplace
import matplotlib
matplotlib.use("Agg")
from torch.distributions.multivariate_normal import MultivariateNormal
import seaborn as sns
sns.set_style('white')
from torch.utils.data import TensorDataset
from main import *
from utils import exact_hessian
plt = matplotlib.pyplot
plt.rcParams["axes.titlesize"] = 8
# DM
import torch.multiprocessing
from torch.multiprocessing import Process, Manager
# DM
# This is required both to get AMD CPUs to work well, but also
# to disable the aggressive multi-threading of the underlying
# linear algebra libraries, which interferes with our multiprocessing
# with PyTorch
os.environ['CUDA_VISIBLE_DEVICES'] = '' # disable CUDA
os.environ['MKL_DEBUG_CPU_TYPE'] = '5' # Get MKL to work properly on AMD CPU
os.environ['MKL_SERIAL'] = 'YES' # reduce thread usage in linalg
os.environ['OMP_NUM_THREADS'] = '1' # reduce thread usage in linalg
# dataset
def get_data(args):
train_size = int(args.n)
valid_size = int(args.n * 0.5)
test_size = int(10000)
X_rv = MultivariateNormal(torch.zeros(args.input_dim), torch.eye(args.input_dim))
y_rv = MultivariateNormal(torch.zeros(args.output_dim), torch.eye(args.output_dim))
with torch.no_grad():
X = X_rv.sample(torch.Size([train_size+valid_size]))
X_test = args.X_test_std * X_rv.sample(torch.Size([test_size]))
if args.realizable == 1:
true_model = Model(args.input_dim, args.output_dim, args.ffrelu_layers, args.ffrelu_hidden, args.rr_hidden, args.use_rr_relu)
true_model.eval()
true_mean = true_model(X)
true_mean_test = true_model(X_test)
else:
a = Normal(0.0, 1.0)
a_params = 0.2 * a.sample((args.input_dim, args.rr_hidden))
b = Normal(0.0, 1.0)
b_params = 0.2 * b.sample((args.rr_hidden, args.output_dim))
true_mean = torch.matmul(torch.matmul(X, a_params), b_params)
true_mean_test = torch.matmul(torch.matmul(X_test, a_params), b_params)
y = true_mean + y_rv.sample(torch.Size([train_size+valid_size]))
y_test = true_mean_test + y_rv.sample(torch.Size([test_size]))
dataset_train, dataset_valid = torch.utils.data.random_split(TensorDataset(X, y), [train_size, valid_size])
dataset_test = TensorDataset(X_test, y_test)
oracle_mse = (torch.norm(y_test - true_mean_test, dim=1)**2).mean()
entropy = -torch.log((2 * np.pi) ** (-args.output_dim / 2) * torch.exp(-(1 / 2) * torch.norm(y_test - true_mean_test, dim=1) ** 2)).mean()
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=int(args.batchsize), shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=int(args.batchsize), shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=int(args.batchsize), shuffle=True)
return train_loader, valid_loader, test_loader, oracle_mse, entropy
# model: small feedforward relu block, followed by reduced rank regression in last layers
class Model(nn.Module):
def __init__(self, input_dim, output_dim, ffrelu_layers, ffrelu_hidden, rr_hidden, use_rr_relu):
super(Model, self).__init__()
self.use_rr_relu = use_rr_relu
# feedforward relu block
self.enc_sizes = np.concatenate(
([input_dim], np.repeat(ffrelu_hidden, ffrelu_layers + 1), [input_dim])).tolist()
blocks = [[nn.Linear(in_f, out_f), nn.ReLU()]
for in_f, out_f in zip(self.enc_sizes, self.enc_sizes[1:])]
blocks = list(itertools.chain(*blocks))
del blocks[-1] # remove the last ReLu, don't need it in output layer
self.feature_map = nn.Sequential(*blocks)
# reduced rank regression block
self.rr = nn.Sequential(
nn.Linear(input_dim, rr_hidden, bias=False), # A
nn.Linear(rr_hidden, output_dim, bias=False) # B
)
# reduced rank regression block with relu activation
self.rr_relu = nn.Sequential(
nn.Linear(input_dim, rr_hidden, bias=False), # A
nn.ReLU(),
nn.Linear(rr_hidden, output_dim, bias=False) # B
)
def forward(self, x):
x = self.feature_map(x)
if self.use_rr_relu == 1:
return self.rr_relu(x)
else:
return self.rr(x)
def map_train(args, train_loader, valid_loader, test_loader, oracle_mse):
model = Model(args.input_dim, args.output_dim, args.ffrelu_layers, args.ffrelu_hidden, args.rr_hidden, args.use_rr_relu)
opt = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=args.weight_decay)
early_stopping = EarlyStopping(patience=10, verbose=False, taskid=args.taskid)
for it in range(args.train_epochs):
model.train()
running_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
y_pred = model(data)
l = (torch.norm(y_pred - target, dim=1)**2).mean()
l.backward()
opt.step()
opt.zero_grad()
running_loss += (torch.norm(y_pred - target, dim=1)**2).sum().detach().numpy()
# between epochs
model.eval()
with torch.no_grad():
valid_loss = 0
for batch_idx, (data, target) in enumerate(valid_loader):
valid_loss += (torch.norm(model(data) - target, dim=1)**2).sum()
if args.use_early_stopping == 1:
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# print test loss every now and then
if it % args.log_interval == 0:
model.eval()
with torch.no_grad():
test_loss = 0
for batch_idx, (data, target) in enumerate(test_loader):
ytest_pred = model(data)
test_loss += (torch.norm(ytest_pred - target, dim=1)**2).sum()
print('MSE: train {:.3f}, validation {:.3f}, test {:.3f}, oracle on test set {:.3f}'.format(running_loss/len(train_loader.dataset), valid_loss/len(valid_loader.dataset), test_loss/len(test_loader.dataset), oracle_mse))
return model
def laplace_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=False):
A = list(model.parameters())[-2]
A_map = A.view(-1).data.numpy()
B = list(model.parameters())[-1]
B_map = B.view(-1).data.numpy()
if lastlayeronly:
W_map = B_map
else:
W_map = np.concatenate((A_map, B_map))
# get negative log posterior = negative log likelihood + negative log prior
y_pred = model(X_train)
nll = (torch.norm(y_pred - Y_train, dim=1)**2).mean()
# Negative-log-prior
nlp = 1 / 2 * A.flatten() @ (args.weight_decay * torch.eye(A.numel())) @ A.flatten() + 1 / 2 * B.flatten() @ (args.weight_decay * torch.eye(B.numel())) @ B.flatten()
loss = nll + nlp
if lastlayeronly:
Lambda = exact_hessian(loss, [B]) # The Hessian of the negative log-posterior
else:
Lambda = exact_hessian(loss, [A, B]) # The Hessian of the negative log-posterior
Sigma = torch.inverse(Lambda).detach().numpy()
# posterior over w approximated as N(w_map, Sigma)
sampled_weights = np.random.multivariate_normal(mean=W_map, cov=Sigma, size=args.R)
with torch.no_grad():
transformed_X_test = model.feature_map(X_test)
if lastlayeronly:
transformed_X_test = np.matmul(transformed_X_test, A.detach().numpy())
pred_prob = 0
for r in range(0, args.R):
if lastlayeronly:
sampled_b = sampled_weights[r,:].reshape(args.rr_hidden,args.output_dim)
mean = np.matmul(transformed_X_test, sampled_b)
else:
sampled_a = sampled_weights[r,0:(args.rr_hidden*args.input_dim)].reshape(args.input_dim,args.rr_hidden)
sampled_b = sampled_weights[r,-(args.rr_hidden*args.output_dim):].reshape(args.rr_hidden,args.output_dim)
mean = np.matmul(np.matmul(transformed_X_test, sampled_a), sampled_b)
pred_prob += (2 * np.pi) ** (-args.output_dim / 2) * torch.exp(-(1 / 2) * torch.norm(Y_test - mean, dim=1) ** 2)
return -torch.log(pred_prob / args.R).mean()
def mcmc_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=False):
B = list(model.parameters())[-1]
A = list(model.parameters())[-2]
transformed_X_train = model.feature_map(X_train)
transformed_X_test = model.feature_map(X_test)
if lastlayeronly:
transformed_X_train = torch.matmul(transformed_X_train, A)
transformed_X_test = torch.matmul(transformed_X_test, A)
if args.use_rr_relu:
transformed_X_train = torch.relu(transformed_X_train)
transformed_X_test = torch.relu(transformed_X_test)
kernel = NUTS(conditioned_pyro_rr, adapt_step_size=True)
mcmc = MCMC(kernel, num_samples=args.R, warmup_steps=args.num_warmup, disable_progbar=True)
if args.mcmc_prior_map == 1:
mcmc.run(pyro_rr, transformed_X_train, Y_train, args.rr_hidden, beta=1.0, Bmap=B, Amap=A, relu=args.use_rr_relu, lastlayeronly=lastlayeronly)
else:
mcmc.run(pyro_rr, transformed_X_train, Y_train, args.rr_hidden, beta=1.0, Bmap=None, Amap=None, relu=args.use_rr_relu, lastlayeronly=lastlayeronly)
sampled_weights = mcmc.get_samples()
pred_prob = 0
output_dim = Y_train.shape[1]
for r in range(0, args.R):
if lastlayeronly:
mean = torch.matmul(transformed_X_test, sampled_weights['B'][r,:,:])
else:
if args.use_rr_relu:
z = torch.relu(torch.matmul(transformed_X_test, sampled_weights['A'][r, :, :]))
else:
z = torch.matmul(transformed_X_test, sampled_weights['A'][r,:,:])
mean = torch.matmul(z, sampled_weights['B'][r,:,:])
pred_prob += (2 * np.pi) ** (-output_dim / 2) * torch.exp(-(1 / 2) * torch.norm(Y_test - mean, dim=1) ** 2)
return -torch.log(pred_prob / args.R).mean()
def run_worker(i, n, G_mcmc_rrs, G_mcmc_lasts, G_maps, G_laplace_rrs, G_laplace_lasts, entropys, args):
G_map = np.empty(args.MCs)
G_mcmc_rr = np.empty(args.MCs)
G_mcmc_last = np.empty(args.MCs)
G_laplace_rr = np.empty(args.MCs)
G_laplace_last = np.empty(args.MCs)
entropy_array = np.empty(args.MCs)
args.n = n
if args.use_minibatch == 0:
args.batchsize = n
else:
args.batchsize = 32
start = time.time()
for mc in range(0, args.MCs):
train_loader, valid_loader, test_loader, oracle_mse, entropy = get_data(args)
entropy_array[mc] = entropy
X_train = train_loader.dataset[:][0]
Y_train = train_loader.dataset[:][1]
X_test = test_loader.dataset[:][0]
Y_test = test_loader.dataset[:][1]
model = map_train(args, train_loader, valid_loader, test_loader, oracle_mse)
model.eval()
G_map[mc] = -torch.log((2*np.pi)**(-args.output_dim /2) * torch.exp(-(1/2) * torch.norm(Y_test-model(X_test), dim=1)**2)).mean() - entropy
G_laplace_rr[mc] = laplace_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=False) - entropy
G_laplace_last[mc] = laplace_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=True) - entropy
G_mcmc_rr[mc] = mcmc_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=False) - entropy
G_mcmc_last[mc] = mcmc_last(model, args, X_train, Y_train, X_test, Y_test, lastlayeronly=True) - entropy
print('[n = {}, mc {}] gen error: map {:.4f}, mcmc rr {:.4f}, laplace rr {:.4f}, mcmc last {:.4f}, laplace last {:.4f}'
.format(n, mc, G_map[mc], G_mcmc_rr[mc], G_laplace_rr[mc], G_mcmc_last[mc], G_laplace_last[mc]))
print('[n = {}] average gen error: MAP {}, mcmc rr {}, laplace rr {}, mcmc last {}, laplace last {}'
.format(n, G_map.mean(), G_mcmc_rr.mean(), G_laplace_rr.mean(), G_mcmc_last.mean(), G_laplace_last.mean()))
print('[n = {}] time taken(s): {}'.format(n, time.time() - start))
G_mcmc_rrs[i] = G_mcmc_rr
G_mcmc_lasts[i] = G_mcmc_last
G_laplace_rrs[i] = G_laplace_rr
G_laplace_lasts[i] = G_laplace_last
G_maps[i] = G_map
entropys[i] = entropy_array
return
def main():
parser = argparse.ArgumentParser(description='last layer Bayesian')
parser.add_argument('--experiment-name', type=str, default='')
parser.add_argument('--taskid', type=int, default=1)
# Data
parser.add_argument('--input-dim', type=int, default=3)
parser.add_argument('--output-dim', type=int, default=3)
parser.add_argument('--X-test-std', type=float, default=1.0)
parser.add_argument('--realizable', type=int, default=0, help='1 if true distribution is realizable by model')
# Model
parser.add_argument('--ffrelu-layers',type=int, default=1, help='number of layers in feedforward relu block')
parser.add_argument('--ffrelu-hidden',type=int, default=5, help='number of hidden units in feedforward relu block')
parser.add_argument('--rr-hidden', type=int, default=3, help='number of hidden units in final reduced regression layers')
parser.add_argument('--use-rr-relu', type=int, default=0, help='1 if true, 0 else')
parser.add_argument('--use-minibatch', type=int, default=0, help='1 if use minbatch sgd for map training')
parser.add_argument('--train-epochs', type=int, default=5000, help='number of epochs to find MAP')
parser.add_argument('--use-early-stopping', type=int, default=0, help='1 to employ early stopping in map training based on validation loss')
parser.add_argument('--log-interval', type=int, default=500, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('--weight-decay', type=float, default=5e-4)
# MCMC
parser.add_argument('--mcmc-prior-map', type=int, default=0, help='1 if mcmc prior should be centered at map')
parser.add_argument('--num-warmup', type=int, default=1000, help='burn in')
parser.add_argument('--R', type=int, default=1000, help='number of MC draws from approximate posterior')
parser.add_argument('--MCs', type=int, default=20, help='number of times to split into train-test')
parser.add_argument('--num-n', type=int, default=10, help='number of sample sizes for learning curve')
parser.add_argument('--seed', type=int, default=43)
parser.add_argument('--cuda', action='store_true',default=False, help='flag for CUDA training')
args = parser.parse_args()
torch.manual_seed(args.seed)
args.cuda = args.cuda and torch.cuda.is_available()
init_model = Model(args.input_dim, args.output_dim, args.ffrelu_layers, args.ffrelu_hidden, args.rr_hidden, args.use_rr_relu)
args.total_param_count = sum(p.numel() for p in init_model.parameters() if p.requires_grad)
args.w_dim = args.rr_hidden*(args.input_dim + args.output_dim) # number of parameters in reduced rank regression layers
H0 = min(args.input_dim, args.output_dim, args.rr_hidden)
args.trueRLCT = theoretical_RLCT('rr', (args.input_dim, args.output_dim, H0, args.rr_hidden))
n_range = np.rint(np.logspace(2.3, 3.0, 10)).astype(int)
args.n_range = n_range
print(args)
torch.save(args,'{}_taskid{}_args.pt'.format(args.experiment_name, args.taskid))
# We do each n in parallel
manager = Manager()
m_G_maps = manager.list(n_range)
m_G_mcmc_rrs = manager.list(n_range)
m_G_mcmc_lasts = manager.list(n_range)
m_G_laplace_rrs = manager.list(n_range)
m_G_laplace_lasts = manager.list(n_range)
m_entropys = manager.list(n_range)
jobs = []
for i in range(len(n_range)):
n = n_range[i]
print("Starting job [n = {0}]".format(n))
p = Process(target=run_worker, args=(i, n, m_G_mcmc_rrs, m_G_mcmc_lasts, m_G_maps, m_G_laplace_rrs, m_G_laplace_lasts, m_entropys, args))
jobs.append(p)
p.start()
# block on all jobs completing
for p in jobs:
p.join()
# variables to save for producing graphics/table later
G_mcmc_rrs = list(m_G_mcmc_rrs)
G_mcmc_lasts = list(m_G_mcmc_lasts)
G_maps = list(m_G_maps)
G_laplace_rrs = list(m_G_laplace_rrs)
G_laplace_lasts = list(m_G_laplace_lasts)
entropys = list(m_entropys)
results = dict()
results['mcmc_rr'] = G_mcmc_rrs
results['mcmc_last'] = G_mcmc_lasts
results['map'] = G_maps
results['laplace_rr'] = G_laplace_rrs
results['laplace_last'] = G_laplace_lasts
results['entropy'] = entropys
torch.save(results,'lastlayersims/{}_taskid{}_results.pt'.format(args.experiment_name, args.taskid))
if __name__ == "__main__":
main()
|
multivariate_images_tools.py
|
##############################################################################
# Some functions useful for treatments on multivariate images
# Authored by Ammar Mian, 09/11/2018
# e-mail: ammar.mian@centralesupelec.fr
##############################################################################
# Copyright 2018 @CentraleSupelec
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from multiprocessing import Process, Queue
import numpy as np
from generic_functions import *
import time
from tqdm import tqdm
def sliding_windows_treatment_image_time_series(image, windows_mask, function_to_compute, function_args, multi=False, queue=0):
""" A function that allowing to compute a sliding windows treatment over a multivariate
image time series.
Inputs:
* image = a numpy array of shape (n_r,n_c,p,T) where n_r is the number of rows,
n_c is the number of columns, p is the number of canals and T is the length
of the time series.
* windows_mask = a local mask to selection data. is a numpy boolean array.
* function_to_compute = a function to compute the desired quantity. Must output a list.
* function_args = arguments to pass to function_to_compute
* multi = True if parallel computing (use the parallel function not this one), False if not
* queue = to obtain result for parralel computation
Outputs:
* a 3-d array corresponding to the results. First two dimensions are spatial while the third correspond
to the output of function_to_compute."""
n_r, n_c, p, T = image.shape
m_r, m_c = windows_mask.shape
N = m_r*m_c
result = []
if multi:
for i_r in range(int(m_r/2),n_r-int(m_r/2)): # Iterate on rows
result_line = []
for i_c in range(int(m_c/2),n_c-int(m_c/2)): # Iterate on columns
# Obtaining data corresponding to the neighborhood defined by the mask
local_data = image[i_r-int(m_r/2):i_r+int(m_r/2)+1, i_c-int(m_c/2):i_c+int(m_c/2)+1, :, 0].T.reshape((p,N))
for t in range(1,T):
local_data = np.dstack((local_data, image[i_r-int(m_r/2):i_r+int(m_r/2)+1, i_c-int(m_c/2):i_c+int(m_c/2)+1, :, t].T.reshape((p,N))))
# Applying mask
local_data = local_data.reshape((p,N,T))
local_data = local_data[:,windows_mask.reshape(m_r*m_c).astype(bool),:]
# Computing the function over the local data
result_line.append(function_to_compute(local_data, function_args))
result.append(result_line)
queue.put(result)
else:
for i_r in tqdm(range(int(m_r/2),n_r-int(m_r/2))): # Iterate on rows
result_line = []
for i_c in range(int(m_c/2),n_c-int(m_c/2)): # Iterate on columns
# Obtaining data corresponding to the neighborhood defined by the mask
local_data = image[i_r-int(m_r/2):i_r+int(m_r/2)+1, i_c-int(m_c/2):i_c+int(m_c/2)+1, :, 0].T.reshape((p,N))
for t in range(1,T):
local_data = np.dstack((local_data, image[i_r-int(m_r/2):i_r+int(m_r/2)+1, i_c-int(m_c/2):i_c+int(m_c/2)+1, :, t].T.reshape((p,N))))
# Applying mask
local_data = local_data.reshape((p,N,T))
local_data = local_data[:,windows_mask.reshape(m_r*m_c).astype(bool),:]
# Computing the function over the local data
result_line.append(function_to_compute(local_data, function_args))
result.append(result_line)
return np.array(result)
def sliding_windows_treatment_image_time_series_parallel(image, windows_mask, function_to_compute, function_args,
multi=False, number_of_threads_rows=3, number_of_threads_columns=3):
""" A function that is a prallelisation of sliding_windows_treatment_image_time_series
Inputs:
* image = a numpy array of shape (n_r,n_c,p,T) where n_r is the number of rows,
n_c is the number of columns, p is the number of canals and T is the length
of the time series.
* windows_mask = a local mask to selection data. is a numpy boolean array.
* function_to_compute = a function to compute the desired quantity. Must output a list.
* function_args = arguments to pass to function_to_compute
* multi = True if parallel computing, False if not
* number_of_threads_columns = number of thread to use in columns
(total threads = number of cores of the machine in general)
* number_of_threads_rows = number of thread to use in columns
(total threads = number of cores of the machine in general)
Outputs:
* number_of_threads_columns = number of thread to use in columns
(total threads = number of cores of the machine in general)"""
if multi:
# Slicing original image while taking into accound borders effects
n_r, n_c, p, T = image.shape
m_r, m_c = windows_mask.shape
image_slices_list = [] # Will contain each slice
for i_row in range(0,number_of_threads_rows):
# Indexes for the sub_image for rows
if i_row == 0:
index_row_start = 0
else:
index_row_start = int(n_r/number_of_threads_rows)*i_row - int(m_r/2)
if i_row == number_of_threads_rows-1:
index_row_end = n_r
else:
index_row_end = int(n_r/number_of_threads_rows)*(i_row+1) + int(m_r/2)
# Slices for each row
image_slices_list_row = []
for i_column in range(0, number_of_threads_columns):
# Indexes for the sub_image for colums
if i_column == 0:
index_column_start = 0
else:
index_column_start = int(n_c/number_of_threads_columns)*i_column - int(m_c/2)
if i_column == number_of_threads_columns-1:
index_column_end = n_c
else:
index_column_end = int(n_c/number_of_threads_columns)*(i_column+1) + int(m_c/2)
# Obtaining each slice and putting it in the list
image_slice = image[index_row_start:index_row_end, index_column_start:index_column_end, :, :]
image_slices_list_row.append(image_slice)
# 2d list of slices
image_slices_list.append(image_slices_list_row)
# Freeing space
image_slice = None
image_slices_list_row = None
# Serves to obtain result for each thread
queues = [[Queue() for i_c in range(number_of_threads_columns)] for i_r in range(number_of_threads_rows)]
# Arguments to pass to each thread
args = [(image_slices_list[i_r][i_c], windows_mask, function_to_compute, function_args,
True, queues[i_r][i_c]) for i_r in range(number_of_threads_rows) for i_c in range(number_of_threads_columns)]
# Initialising the threads
jobs = [Process(target=sliding_windows_treatment_image_time_series, args=a) for a in args]
# Starting parallel computation
for j in jobs: j.start()
# Obtaining result for each thread
results_list = [] # Results container
for i_r in range(0,number_of_threads_rows):
results_row_list = []
for i_c in range(0,number_of_threads_columns):
results_row_list.append( queues[i_r][i_c].get() )
results_list.append(results_row_list)
results_row_list = None
# Waiting for each thread to terminate
for j in tqdm(jobs): j.join()
# Now we reform the resulting image from the slices of results
results = []
for i_r in range(0,number_of_threads_rows):
final_array_row = []
for i_c in range(0,number_of_threads_columns):
final_array_row.append(results_list[i_r][i_c])
results.append(np.hstack(final_array_row))
results = np.vstack(results)
final_array_row = None
else:
results = sliding_windows_treatment_image_time_series(image, windows_mask,
function_to_compute, function_args)
return results
|
engine.py
|
"""
Event-driven framework of vn.py framework.
vn.py框架的事件驱动框架。
"""
##############################################
# 1.事件的注册和取消,使用者可以根据自己的需求来设置引擎需要关心那些事件
# 2.事件对于的处理方法的挂钩。显然,一个事件可以由多个方法来处理,也可以一个方法处理多个事件。
# 3.不断监听事件的发生与否,如果发生就进行相应的处理,也就是调用设置好的函数。
from collections import defaultdict
from queue import Empty, Queue
from threading import Thread
from time import sleep
from typing import Any, Callable
EVENT_TIMER = "eTimer"
class Event:
"""
Event object consists of a type string which is used
by event engine for distributing event, and a data
object which contains the real data.
事件对象由事件引擎用于分发事件的类型字符串和包含实际数据的数据对象组成。
"""
def __init__(self, type: str, data: Any = None):
""""""
self.type = type
self.data = data
# Defines handler function to be used in event engine.
# 定义要在事件引擎中使用的处理函数。
HandlerType = Callable[[Event], None]
class EventEngine:
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
def __init__(self, interval: int = 1):
"""
Timer event is generated every 1 second by default, if
interval not specified.
如果未指定时间间隔,则默认情况下每1秒生成一次计时器事件。
"""
### 作用:初始化事件管理器
self._interval = interval # 事件周期
self._queue = Queue() # 事件队列
self._active = False # 事件引擎开关
self._thread = Thread(target=self._run) # 事件处理线程
self._timer = Thread(target=self._run_timer) # 计时器用于触发计时器事件
# 这里的_handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self._handlers = defaultdict(list) # 一个字典
# _general_handlers是一个列表,用来保存通用回调函数(所有事件均调用)
self._general_handlers = []
def _run(self):
"""
Get event from queue and then process it.
从队列中循环获取事件,然后对其进行处理
"""
while self._active:
try:
# 每1秒从队列里获取一次事件
event = self._queue.get(block=True, timeout=1)
self._process(event) # 生成一个变量
except Empty:
pass
def _process(self, event: Event):
"""
First ditribute event to those handlers registered listening
to this type.
Then distrubute event to those general handlers which listens
to all types.
首先将事件分发给已注册侦听此类型的处理程序。
然后甚至分发给那些侦听所有类型的常规处理程序。
"""
# 检查是否存在对该事件进行监听处理函数
if event.type in self._handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self._handlers[event.type]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
# for handler in self._handlers[event.type]:
# handler(event)
# 检查是否存在常规线程的处理函数
if self._general_handlers:
# 同上
[handler(event) for handler in self._general_handlers]
def _run_timer(self):
"""
Sleep by interval second(s) and then generate a timer event.
以间隔秒为单位休眠,然后生成一个计时器事件
"""
while self._active:
sleep(self._interval) # 阻塞时间,默认设置1秒,可以在初始化的时候更改
event = Event(EVENT_TIMER) # 创建计时器事件
self.put(event) # 向队列中存入计时器事件
def start(self):
"""
Start event engine to process events and generate timer events.
启动事件引擎以处理事件并生成计时器事件
"""
self._active = True # 将事件引擎设置为启动
self._thread.start() # 启动事件引擎
self._timer.start() # 启动计时器,默认为1秒
def stop(self):
"""
Stop event engine.
停止事件引擎
"""
self._active = False # 将引擎设置为停止
self._timer.join() # 停止计时器
self._thread.join() # 停止事件引擎线程
def put(self, event: Event):
"""
Put an event object into event queue.
将事件对象放入事件队列。
"""
self._queue.put(event)
def register(self, type: str, handler: HandlerType):
"""
Register a new handler function for a specific event type. Every
function can only be registered once for each event type.
为特定事件类型注册新的处理函数。对于每种事件类型,每个功能只能注册一次。
"""
# 获取该事件类型对应的处理函数列表
handler_list = self._handlers[type]
# 如果不在列表中,则添加进去
if handler not in handler_list:
handler_list.append(handler)
def unregister(self, type: str, handler: HandlerType):
"""
Unregister an existing handler function from event engine.
从事件引擎中注销现有事件的处理函数。
"""
handler_list = self._handlers[type]
# 如果存在该事件,则移除
if handler in handler_list:
handler_list.remove(handler)
# 如果不存在该事件,则移除该事件类型
if not handler_list:
self._handlers.pop(type)
def register_general(self, handler: HandlerType):
"""
Register a new handler function for all event types. Every
function can only be registered once for each event type.
为所有事件类型注册一个新的处理函数。对于每种事件类型,每个功能只能注册一次。
"""
if handler not in self._general_handlers:
self._general_handlers.append(handler)
def unregister_general(self, handler: HandlerType):
"""
Unregister an existing general handler function.
注销现有的常规处理函数。
"""
if handler in self._general_handlers:
self._general_handlers.remove(handler)
if __name__ == "__main__":
from vnpy.event import EventEngine
EVENT_TIMER = "eTimer"
def func(event):
print("hello timer!")
ee = EventEngine()
ee.register(EVENT_TIMER, func)
ee.start()
|
client_handler.py
|
########################################################################################################################
# Class: Computer Networks
# Date: 02/03/2020
# Lab3: Server support for multiple clients
# Goal: Learning Networking in Python with TCP sockets
# Student Name: Rohan Rawat
# Student ID: 917018484
# Student Github Username: rawatrohan123
# Lab Instructions: No partial credit will be given. Labs must be completed in class, and must be committed to your
# personal repository by 9:45 pm.
# Running instructions: This program needs the server to run. The server creates an object of this class.
#
########################################################################################################################
from threading import Thread
import threading
import pickle
import random
from bot import Bot
from menu import Menu
from message import Message
from datetime import datetime
from cdma import CDMA
from network_map import NetworkMap
from chat_room import ChatRoom
import sys
from distance_protocols import DistanceProtocols
class ClientHandler:
"""
The client handler class receives and process client requests
and sends responses back to the client linked to this handler.
"""
MAX_ALLOC_MEM = 4096
chat_rooms = []
def __init__(self, server_instance, clienthandler, addr):
"""
Class constructor already implemented for you.
:param server_instance: passed as 'self' when the object of this class is created in the server object
:param clientsocket: the accepted client on server side. this handler, by itself, can send and receive data
from/to the client that is linked to.
:param addr: addr[0] = server ip address, addr[1] = client id assigned buy the server
"""
self.server_ip = addr[0]
self.client_id = addr[1]
self.server = server_instance
self.handler = clienthandler
self.print_lock = threading.Lock() # creates the print lock
self.username = self.receive()
self.messages = Message
self.send_client_info()
self.cdma = CDMA()
self.bots = []
def process_requests(self):
"""
TODO: Create a loop that keeps waiting for client requests.
Note that the process_request(...) method is executed inside the loop
Recall that you must break the loop when the request received is empty.
:return: VOID
"""
data = self.handler.recv(self.MAX_ALLOC_MEM)
deserialized = pickle.loads(data)
self.process_request(deserialized)
def process_request(self, request):
"""
TODO: This implementation is similar to the one you did in the method process_request(...)
that was implemented in the server of lab 3.
Note that in this case, the clienthandler is not passed as a parameter in the function
because you have a private instance of it in the constructor that can be invoked from this method.
:request: the request received from the client. Note that this must be already deserialized
:return: VOID
"""
self.username = request
request = request + " has connected\nClient ID: " + str(self.client_id)
self.log(request)
def send(self, data):
"""
Serializes data with pickle, and then sends the serialized data.
"""
serialized = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
self.handler.send(serialized)
def receive(self, max_mem_alloc=4096):
"""
TODO: receive the data, deserializes the data received
:max_mem_alloc: an integer representing the maximum allocation (in bytes) in memory allowed
for the data that is about to be received. By default is set to 4096 bytes
:return: the deserialized data
"""
data = self.handler.recv(max_mem_alloc)
deserialized_data = pickle.loads(data)
return deserialized_data
def send_client_info(self):
"""
Sends the client info the the client
"""
message = "Your client info is:\nClient Name: " + self.username + "\nClient ID: " + str(self.client_id)
data = {"input": 0, "cache": 0, "message": message}
serialized_data = pickle.dumps(data)
self.handler.send(serialized_data)
def log(self, message):
"""
TODO: log a message on the server windows.
note that before calling the print statement you must acquire a print lock
the print lock must be released after the print statement.
"""
self.print_lock.acquire()
print(message)
self.print_lock.release()
def get_num_users(self):
"""
Returns the number of users currently in the server.
"""
usercount = 0
for entries in self.server.handlers:
usercount = usercount + 1
return usercount
def send_user_list(self):
"""
Sends the list of users that are currently connected to the server
to the client.
"""
usercount = 0
for entries in self.server.handlers:
usercount = usercount + 1
message = "Users Connected: " + str(usercount) + "\n"
for key, value in self.server.handlers.items():
usercount = usercount - 1
message += str(value.username) + ":" + str(key)
if usercount > 0:
message += ", "
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
self.log("Sent user list to client " + self.username + "/" + str(self.client_id))
def send_map(self):
self.log(self.username + ": Mapping the network and sending")
users = []
for key, value in self.server.handlers.items():
users.append(value.username)
user_names = ["Rohan", "Jose", "John", "Amelia"]
distance = DistanceProtocols.map_network([])
message = "Routing table requested! Waiting for response...\n\n\n"
num_index = 0
for user in user_names:
message += "\t\t\t\t\t" + user
message += "\n"
for i in range(0, len(distance)):
for j in range(0, len(distance)):
if j == 0:
message += user_names[num_index]
num_index = num_index + 1
message += (str("\t\t\t\t\t" + str(distance[i][j])) + "\t")
message += "\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
def send_link_state(self):
self.log("Sending link state routing table")
user_names = ["Rohan", "Jose", "John", "Amelia"]
distances = DistanceProtocols.map_network(user_names)
message = "\nRouting table for Rohan (id: 50851) computed with Link State Protocol:\n\n"
message += self.link_state()
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
self.send("")
def send_distance_vector(self):
self.log(self.username + ": Sending routing table with distance vector")
users = []
for key, value in self.server.handlers.items():
users.append(value.username)
user_names = ["Rohan", "Jose", "John", "Amelia"]
distances = DistanceProtocols.map_network(user_names)
distance = DistanceProtocols.distance_vector(distances)
message = "Routing table computed with Distance Vector Protocol: \n\n\n"
num_index = 0
for user in user_names:
message += "\t\t\t\t\t" + user
message += "\n"
for i in range(0, len(distance)):
for j in range(0, len(distance)):
if j == 0:
message += user_names[num_index]
num_index = num_index + 1
message += (str("\t\t\t\t\t" + str(distance[i][j])) + "\t")
message += "\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
def get_proxy(self):
message = "\nComing Soon!\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
def disconnect_client(self):
self.log("Disconnecting client " + str(self.client_id) + " from server!")
message = "Disconnecting from server"
data = {"disconnect": 1, "input": 0, "cache": 0, "message": message}
self.send(data)
def send_message(self):
"""
Stores a message that the client was to send to a certain recipient id.
"""
active_id = False
message = "Enter your message: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
user_message = self.receive()
message = "Enter recipient id: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
try:
recipient_id = int(self.receive())
except Exception as e:
self.send({"input": 0, "cache": 0,
"message": "Error: Please enter an integer representing recipient id. Please try again"})
return
message = datetime.now().strftime('%Y-%m-%d %H:%M') + ": " + user_message + " (private message from " \
+ self.username + ")"
for key, value in self.server.handlers.items():
if key == recipient_id: # checks to see if recipient id matches one currently connected to server
active_id = True
if active_id:
Message.add_message(message, recipient_id)
message = "Message sent!"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
self.log(self.username + "/" + str(self.client_id) + " sent a message!")
else:
message = "Invalid recipient id. Please try again"
data = {"input": 0, "cache": 0, "message": message}
self.log(self.username + "/" + str(self.client_id) + " failed to send a message!")
self.send(data)
def link_state(self):
user_names = ["Rohan", "Jose", "John", "Amelia"]
message = "Destination\t\t\t\tPath\t\t\t\tCost\t\t\t\t\n" +\
"Jose\t\t\t\t{Rohan, Jose}\t\t\t\t15\t\t\t\t\n"+\
"John\t\t\t\t{Rohan, John}\t\t\t\t10\t\t\t\t\n" +\
"Amelia\t\t\t\t{Rohan, John, Amelia}\t\t\t\t35\t\t\t\t\n"
return message
def get_messages(self):
"""
Sends the messages of a certain recipient id and sends them to the
user in a array that contains the frequency and codes (Implementing CDMA).
"""
max_data_length = 0
user_bit = []
encoded_data = []
message = "\nNumber of unread messages: " + str(Message.num_of_messages(self.client_id)) + "\n" \
+ "Retrieving messages...Please wait this may take a while..."
if Message.num_of_messages(self.client_id) == 0:
message = "\nNumber of unread messages: " + str(Message.num_of_messages(self.client_id)) + "\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
return
data = {"input": 0, "cache": 0, "cdma": 1, "message": message}
self.send(data)
user_messages = Message.get_messages(self.client_id)
for user_message in user_messages:
user_bit.append(self.cdma.text_to_bits(user_message)) # converting all messages into bits
for user in user_bit: # calc max data length based on bits
if max_data_length < len(user):
max_data_length = len(user)
for user in user_bit:
while len(user) != max_data_length: # append data which are shorter to make all data equal length
user.extend((0, 0, 1, 0, 0, 0, 0, 0))
code = self.cdma.codes(len(user_bit), 2 * max_data_length)
i = 0
for c in code:
encoded_data.append(self.cdma.encode(user_bit[i], c)) # encode all the data using codes
i += 1
freq = self.cdma.encode_all(encoded_data)
response = [freq, code]
for c in code:
response.append(c)
data_size = sys.getsizeof(response)
packet = ""
str_response = str(response)
x = 0
while x < len(str_response): # sends the data in packets to the client
packet += str_response[x]
x = x + 1
if sys.getsizeof(packet) >= 3000:
self.send(packet)
packet = ""
if packet:
self.send(packet)
data = "finished" # indicates all data has been received
self.send(data)
self.log(self.username + "/" + str(self.client_id) + " has retrieved "
+ str(Message.num_of_messages(self.client_id)) + " unread messages!")
Message.delete_messages(self.client_id)
finish = self.receive()
def udp(self):
"""
Asks for information that will allow the user to send a message using
UDP.
"""
self.log(self.username + "/" + str(self.client_id) + " is trying to send a direct message using UDP")
message = "Enter the recipient ip address: "
message2 = "Enter the recipient port number: "
message3 = "Enter the message: "
data = {"input": 1, "cache": 0, "udp": 1, "message": message, "message2": message2, "message3": message3}
self.send(data)
ip = self.receive()
port = self.receive()
self.log(self.username + "/" + str(self.client_id) + " has attempted to send a message using UDP to " + ip + "/"
+ str(port))
def broadcast(self):
"""
Broadcasts a message to everyone connected to the server including the sender. This message is stored
on the server until the client requests to see all messages.
"""
message = "Enter your message: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
user_message = self.receive()
message = datetime.now().strftime('%Y-%m-%d %H:%M') + ": " + user_message + " (broadcast message from " \
+ self.username + ")"
for key, value in self.server.handlers.items(): # Adding message to message dictionary for everyone connected
Message.add_message(message, key)
message = "Message sent!"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
self.log(self.username + "/" + str(self.client_id) + " has broadcast a message!")
def create_chat_room(self):
"""
Implementation of option 6 where user can create a chat room
"""
self.log(self.username + " is creating a chatroom")
message = "Enter the new channel id: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
channel_id = self.receive()
for active_id in ClientHandler.chat_rooms:
if channel_id == active_id.id:
message = "\nChannel with ID " + str(channel_id) + "is already active\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
return # insert logic for if channel id is active
chat_room = ChatRoom(channel_id, self.client_id)
chat_room.users.append(self.username)
ClientHandler.chat_rooms.append(chat_room)
message = ("Private key received from server and channel " + str(
channel_id) + " was successfully created!\n\n" +
"----------------------- Channel " + str(channel_id) + " ------------------------" +
"\n\nAll the data in this channel is encrypted\n\nGeneral Admin Guidelines:\n" +
"1. #" + self.username + " is the admin of this channel\n2. Type '#exit' to " +
"terminate the channel (only for admins)\n\nGeneral Chat Guidelines:\n" +
"1. Type #exit to exit from this channel.\n" +
"2. Use #<username> to send a private message to that user.\n\n" +
"Waiting for other users to join....\n")
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
message = channel_id
data2 = {"pgpadmin": 1, "cache": 0, "message": message}
self.send(data2)
self.message_thread()
while True:
data = self.receive()
if "#exit" in data:
message = "\nClosing channel " + channel_id
data2 = {"input": 0, "cache": 0, "message": message}
ClientHandler.chat_rooms.remove(chat_room)
self.send(data2)
return
for user in chat_room.users:
test_message = "#" + user
if test_message in data:
data = self.username + " (private message)> " + data
if user in ChatRoom.messages:
ChatRoom.messages[user].append(data)
continue
else:
ChatRoom.messages[user] = []
ChatRoom.messages[user].append(data)
continue
else:
data = self.username + "> " + data
for user in chat_room.users:
if user == self.username:
continue
#if user not in chat_room.users:
# return
if user in ChatRoom.messages:
ChatRoom.messages[user].append(data)
else:
ChatRoom.messages[user] = []
ChatRoom.messages[user].append(data)
# message = ""
# data2 = {"input": 1, "cache": 0, "message": message}
# self.send(data2)
# check to see if channel id key and username
def check_chat_room(self):
while True:
if self.username in ChatRoom.messages.keys():
for cr_message in ChatRoom.messages[self.username]:
self.send(cr_message)
ChatRoom.messages[self.username].clear()
def message_thread(self):
Thread(target=self.check_chat_room, args=()).start()
def join_chat_room(self):
"""
Implementation of option 7 where user can join an active chat room
"""
test_message = ""
self.log(self.username + " is trying to join a chatroom")
active_room = False
message = "Enter the new channel id: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
channel_id = self.receive()
for active_id in ClientHandler.chat_rooms:
if active_id.id == channel_id:
active_room = True
chat_room = active_id
if active_room is False:
message = "\nThis is not an active channel id. Please try again\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
return
chat_room.users.append(self.username)
message = "----------------------- Channel " + str(channel_id) + "------------------------\n" \
"All the data in this channel is encrypted\n" + \
self.username + " has just joined\n"
for user in chat_room.users:
if user == chat_room.admin:
print(user + " is the admin!\n")
else:
print(user + " is already on the server!\n")
message += "1. Type #exit to exit from this channel.\n" +\
"2. Use #<username> to send a private message to that user.\n\n" +\
"Waiting for other users to join....\n"
data = {"input": 0, "cache": 0, "message": message}
self.send(data)
message = ""
data2 = {"pgpadmin": 1, "cache": 0, "message": message}
self.send(data2)
self.message_thread()
while True:
data = self.receive()
private = False
if "#exit" in data: # change to bye later
message = "\nExiting channel " + channel_id
data2 = {"input": 0, "cache": 0, "message": message}
#for j in range(0, len(chat_room.users)):
#if self.username == chat_room.users[j]:
self.send(data2)
return
for user in chat_room.users:
test_message = "#" + user
if test_message in data:
if user in ChatRoom.messages:
data += self.username + " (private message)> " + data
ChatRoom.messages[user].append(data)
private = True
else:
ChatRoom.messages[user] = []
ChatRoom.messages[user].append(data)
private = True
else:
if private is False:
data = self.username + "> " + data
#if user not in chat_room.users:
# return
for user in chat_room.users:
if user == self.username:
continue
if user in ChatRoom.messages:
ChatRoom.messages[user].append(data)
else:
ChatRoom.messages[user] = []
ChatRoom.messages[user].append(data)
# message = ""
# data2 = {"input": 1, "cache": 0, "message": message}
# self.send(data2)
def create_bot(self):
self.log("Creating bot!")
message = "Enter the name of your bot: "
data = {"input": 1, "cache": 0, "message": message}
self.send(data)
name = self.receive()
bot = Bot(name, self.client_id)
ClientHandler.bots.append(bot)
bot_message = "The disabled permissions for this bot are:\n" + \
"1. Welcome users right after they join a channel. \n" + \
"2. Show a warning to the users when they send words that are not allowed\n" + \
"3. Drop users from the channel after 3 warnings\n" + \
"4. Compute the response time of a message when the user request it\n" + \
"5. Inform the user when it has been inactive on the channel for more than 5 minutes.\n\n" + \
"Enter an integer to enable a set of permissions: "
data = {"input": 1, "cache": 0, "message": bot_message}
self.send(data)
permissions = self.receive()
bot.set_permission(permissions)
message = str(bot.name) + "'s Configuration:\n" +\
"\nToken: " + str(bot.token) +\
"\nPermissions Enabled: " + str(bot.permissions) +\
"\nStatus: Ready"
data = {"input": 0, "cache": 0, "message": message}
self.bots.append(bot)
self.send(data)
def run(self):
"""
Runs the client handler
"""
try:
self.log(self.username + " has connected\nClient ID: " + str(self.client_id))
menu = Menu()
while True:
menu.send_menu(self)
menu.get_option(self)
except ConnectionResetError as msg:
print("\n" + self.username + " has disconnected\nClientID: " + str(self.client_id))
self.server.handlers.pop(self.client_id)
|
run_once.py
|
#!/usr/bin/env python
# From: https://gist.github.com/nfarrar/884c72ec107a00606a86
import random
from datetime import datetime
from common_once import *
from multiprocessing import Process
from time import sleep
from acs import flood_acs
from login import flood_login
from antivirus import flood_antivirus
if __name__ == "__main__":
args = parser.parse_args()
p1 = Process(target=flood_acs, args=(args,))
p1.start()
sleep(0.33)
p2 = Process(target=flood_login, args=(args,))
p2.start()
sleep(0.33)
p3 = Process(target=flood_antivirus, args=(args,))
p3.start()
p1.join()
p2.join()
p3.join()
print("Finished")
|
watcher.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.nvsmi import get_gpu_process, get_gpu_util, get_gpu_info
import time
import os
from threading import Thread
class Watcher(object):
def __init__(self, ctx):
self.ctx = ctx
self.interval = 10
self.gpu_util = []
# gpu log file
self.gpus = self.ctx.args.devices or self.ctx.node.device.labels
if len(self.gpus) > 0:
fn = os.path.join(self.ctx.args.log_dir,
"{}.gpu.log".format(self.ctx.args.job_id))
os.makedirs(os.path.dirname(fn), exist_ok=True)
self.gpu_fd = open(fn, 'w')
else:
return
# start
self.proc = Thread(target=self.watch)
self.proc.daemon = True
self.proc.start()
def watch(self):
if not len(self.gpus) > 0:
return
self._print_gpu_info()
util_key = "index,utilization_gpu,memory_total,memory_used,memory_free,timestamp"
self.gpu_fd.write(util_key)
self.gpu_fd.write('\n')
while not self.ctx.status.is_done():
self._save_gpu_log(util_key)
time.sleep(self.interval)
if hasattr(self, "gpu_fd"):
self.gpu_fd.close()
def _print_gpu_info(self):
try:
info_key = "index,uuid,driver_version,name,gpu_serial,display_active,display_mode"
self.gpu_fd.write(info_key)
self.gpu_fd.write('\n')
for line in get_gpu_info(self.gpus):
self.gpu_fd.write(line.str(info_key))
self.gpu_fd.write('\n')
self.gpu_fd.write('\n')
process_key = "pid,process_name,gpu_uuid,gpu_name,used_memory"
self.gpu_fd.write(process_key)
self.gpu_fd.write('\n')
for line in get_gpu_process(self.gpus):
self.gpu_fd.write(line.str(process_key))
self.gpu_fd.write('\n')
self.gpu_fd.write('\n')
self.gpu_fd.flush()
except:
self.ctx.log.error("save gpu info failed")
def _save_gpu_log(self, util_key):
try:
for line in get_gpu_util(self.gpus):
self.gpu_fd.write(line.str(util_key))
self.gpu_fd.write('\n')
self.gpu_fd.flush()
except:
self.ctx.log.error("save gpu log failed")
def stop(self):
if hasattr(self, "proc"):
self.proc.join()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='test', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm/')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm/')
parser.add_argument("-temp_dir", default='../temp/')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
# parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-batch_size", default=30000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
# parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
# parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-test_from", default='../models/model_step_1.pt')
# parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
test_cuda.py
|
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes)).coalesce()
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
# Test the case where the pinned data_ptr is not equal to the storage data_ptr.
x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
x = x_base[1:]
self.assertTrue(x.is_pinned())
self.assertTrue(x_base.is_pinned())
self.assertNotEqual(x_base.data_ptr(), x.data_ptr())
self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())
y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage._TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda._UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_float32_matmul_precision_get_set(self):
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
self.assertFalse(torch.backends.cuda.matmul.allow_tf32, False)
for p in ('medium', 'high'):
torch.set_float32_matmul_precision(p)
self.assertEqual(torch.get_float32_matmul_precision(), p)
self.assertTrue(torch.backends.cuda.matmul.allow_tf32, True)
torch.set_float32_matmul_precision('highest')
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
self.assertFalse(torch.backends.cuda.matmul.allow_tf32, False)
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
# XXX: this test only fails with hip-clang. revisit this once the dust has settled there.
@skipIfRocm
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
range_handle = torch.cuda.nvtx.range_start("range_start")
torch.cuda.nvtx.range_end(range_handle)
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)
self.assertTrue(after_free_bytes < before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
_test(0)
if TEST_MULTIGPU:
_test(1)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 0.+"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_oom(self):
with self.assertRaisesRegex(RuntimeError, "out of memory"):
with torch.cuda.graph(torch.cuda.CUDAGraph()):
torch.zeros(2 ** 40, device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r.coalesce() if r.is_sparse else r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
test_multiprocessing.py
|
# Owner(s): ["module: multiprocessing"]
import contextlib
import gc
import os
import sys
import time
import unittest
import copy
from sys import platform
import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
load_tests, slowTest, TEST_WITH_TSAN, TEST_WITH_ROCM)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir('/dev/shm')
TEST_CUDA_IPC = torch.cuda.is_available() and \
sys.platform != 'darwin' and \
sys.platform != 'win32'
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
class SubProcess(mp.Process):
def __init__(self, tensor):
super(SubProcess, self).__init__()
self.tensor = tensor
self.daemon = True
def run(self):
self.tensor.add_(3)
def _test_cuda_ipc_deadlock_actor(queue, iterations):
for i in range(iterations):
if not queue.empty():
queue.get()
time.sleep(.01)
def _test_cuda_ipc_deadlock_learner(queue, iterations):
net = torch.nn.LSTM(1, 1).cuda()
for i in range(iterations):
if not queue.full():
queue.put(copy.deepcopy(net.state_dict()))
time.sleep(.01)
def simple_fill(queue, event):
data = queue.get()
data[0][:] = 4
event.set()
def simple_pool_fill(tensor):
tensor.fill_(4)
return tensor.add(1)
def send_tensor(queue, event, device, dtype):
t = torch.ones(5, 5, device=device, dtype=dtype)
queue.put(t)
queue.put(t)
event.wait()
def send_and_delete_tensors(queue, event, device, dtype, count, size=5):
for i in range(count):
t = torch.full([size], i, device=device, dtype=dtype)
queue.put(t)
del t
event.wait()
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
s = torch.full([size], 0, device=device, dtype=dtype)
for i in range(count):
t = queue.get()
s += t
out_queue.put(s)
event.wait()
def receive_and_send(queue, out_queue, event, count):
for i in range(count):
t = queue.get()
out_queue.put(t.clone())
event.wait()
def sum_tensors(inq, outq):
with torch.cuda.device(1):
tensors = inq.get()
for tensor in tensors:
outq.put((tensor.sum().item(), tensor.get_device(),
tensor.numel(), tensor.storage().size()))
def queue_get_exception(inqueue, outqueue):
os.close(2) # hide expected error message
try:
torch.zeros(5, 5).cuda()
except Exception as e:
outqueue.put(e)
else:
outqueue.put('no exception')
# Multiply by two in a separate stream
def cuda_multiply_two(queue, ready, done):
ready.set()
with torch.cuda.stream(torch.cuda.Stream()):
cuda_event, tensor = queue.get()
cuda_event.wait()
tensor.mul_(2)
cuda_event.record()
done.set()
del cuda_event
def requires_grad_variable_sharing(queue, ready):
var = queue.get()
ready.set()
queue.put(var.requires_grad)
def integer_parameter_serialization(iparam):
iparam + 1
def autograd_sharing(queue, ready, master_modified, device, is_parameter):
var = queue.get()
ready.set()
master_modified.wait()
expected_var = torch.arange(1., 26, device=device).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5, device=device)
is_ok &= var.grad is None
is_ok &= not var._backward_hooks
if is_parameter:
is_ok &= type(var) == Parameter
else:
is_ok &= type(var) == torch.Tensor
var._grad = torch.ones(5, 5, device=device)
queue.put(is_ok)
def mixed_type_producer(queue, event):
for _ in range(10):
float_tensor = torch.ones(2, 2).float().cuda()
byte_tensor = torch.zeros(2, 2).byte().cuda()
queue.put(float_tensor)
queue.put(byte_tensor)
event.wait()
event.clear()
def simple_autograd_function(a=1):
torch.rand(3).requires_grad_(True).mean().backward()
return a ** 2
@contextlib.contextmanager
def fs_sharing():
prev_strategy = mp.get_sharing_strategy()
mp.set_sharing_strategy('file_system')
try:
yield
finally:
mp.set_sharing_strategy(prev_strategy)
class leak_checker(object):
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
def __enter__(self):
self.next_fds = self._get_next_fds(10)
return self
def __exit__(self, *args):
if torch.cuda.is_available():
torch.cuda.ipc_collect()
if args[0] is None:
# Check that the 10th available file-descriptor at the end of the
# test is no more than 4 higher than the 10th available at the
# start. This attempts to catch file descriptor leaks, but allows
# one-off initialization that may use up a file descriptor
# TODO: Disabled because this check is too flaky
# available_fds = self._get_next_fds(10)
# self.test_case.assertLessEqual(
# available_fds[-1] - self.next_fds[-1], 5)
self.test_case.assertFalse(self.has_shm_files())
return False
def check_pid(self, pid):
self.checked_pids.append(pid)
def _get_next_fds(self, n=1):
# dup uses the lowest-numbered unused descriptor for the new descriptor
fds = [os.dup(0) for i in range(n)]
for fd in fds:
os.close(fd)
return fds
def has_shm_files(self, wait=True):
if not HAS_SHM_FILES:
return False
result = self._has_shm_files()
if result and mp.get_sharing_strategy() == 'file_system' and wait:
time.sleep(0.5)
return self._has_shm_files()
return result
def _has_shm_files(self):
gc.collect()
names = ['torch_' + str(pid) for pid in self.checked_pids]
for filename in os.listdir('/dev/shm'):
for name in names:
if filename.startswith(name):
return True
return False
@unittest.skipIf(TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment")
class TestMultiprocessing(TestCase):
def tearDown(self):
# This will keep tests isolated from each-other
if torch.cuda.is_available():
torch.cuda.ipc_collect()
def _test_sharing(self, ctx=mp, device='cpu', dtype=torch.float, repeat=1):
def test_fill():
x = torch.zeros(5, 5).to(device, dtype)
q = ctx.Queue()
e = ctx.Event()
data = [x, x[:, 1]]
q.put(data)
p = ctx.Process(target=simple_fill, args=(q, e))
p.daemon = True
lc.check_pid(p.pid)
p.start()
e.wait(10)
self.assertTrue(e.is_set())
self.assertTrue(data[0].eq(4).all())
self.assertTrue(data[1].eq(4).all())
p.join(100)
self.assertFalse(p.is_alive())
def test_receive():
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_tensor, args=(q, e, device, dtype))
p.daemon = True
lc.check_pid(p.pid)
p.start()
t1 = q.get()
t2 = q.get()
self.assertTrue(t1.eq(1).all())
s1 = t1.storage()
s2 = t2.storage()
self.assertEqual(type(s1), type(s2))
self.assertEqual(s1.data_ptr(), s1.data_ptr())
self.assertEqual(s1, s2)
# We need to delete this tensors to allow producer (child process)
# collect them properly
del t1, t2
e.set()
p.join(100)
self.assertFalse(p.is_alive())
with leak_checker(self) as lc:
for _ in range(repeat):
test_fill()
test_receive()
def _test_preserve_sharing(self, ctx=mp, repeat=1):
def do_test():
x = torch.randn(5, 5)
data = [x.storage(), x, x[2], x[:, 1]]
q = ctx.Queue()
q.put(data)
new_data = q.get(timeout=1)
self.assertEqual(new_data, data, atol=0, rtol=0)
storage_cdata = data[0]._cdata
self.assertEqual(new_data[0]._cdata, storage_cdata)
for t in new_data[1:]:
self.assertEqual(t.storage()._cdata, storage_cdata)
with leak_checker(self):
for _ in range(repeat):
do_test()
def _test_pool(self, ctx=mp, repeat=1):
def do_test():
p = ctx.Pool(2)
for proc in p._pool:
lc.check_pid(proc.pid)
buffers = [torch.zeros(2, 2) for i in range(4)]
results = p.map(simple_pool_fill, buffers, 1)
self.assertEqual(len(results), len(buffers))
for r in results:
self.assertEqual(r, torch.ones(2, 2) * 5, atol=0, rtol=0)
for b in buffers:
self.assertEqual(b, torch.ones(2, 2) * 4, atol=0, rtol=0)
p.close()
p.join()
with leak_checker(self) as lc:
for _ in range(repeat):
do_test()
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fd_sharing(self):
self._test_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_preserve_sharing(self):
self._test_preserve_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_pool(self):
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fs_sharing(self):
with fs_sharing():
self._test_sharing(repeat=TEST_REPEATS)
def test_fs_preserve_sharing(self):
with fs_sharing():
self._test_preserve_sharing(repeat=TEST_REPEATS)
def test_fs_pool(self):
with fs_sharing():
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(not HAS_SHM_FILES, "don't not how to check if shm files exist")
def test_fs(self):
def queue_put():
x = torch.DoubleStorage(4)
q = mp.Queue()
self.assertFalse(lc.has_shm_files())
q.put(x)
time.sleep(0.05) # queue serializes asynchronously
self.assertTrue(lc.has_shm_files(wait=False))
q.get()
with fs_sharing(), leak_checker(self) as lc:
for _ in range(TEST_REPEATS):
queue_put()
def test_inherit_tensor(self):
t = torch.zeros(5, 5)
p = SubProcess(t.share_memory_())
p.start()
p.join(2)
if p.exitcode is None:
print("test_inherit_tensor: SubProcess too slow")
else:
self.assertEqual(t, torch.ones(5, 5) * 3, atol=0, rtol=0)
@unittest.skipIf(IS_WINDOWS, "Test needs to use fork multiprocessing")
def test_autograd_errors(self):
ctx = mp.get_context('fork')
simple_autograd_function()
# Autograd only uses thread when GPUs are involved
if torch.cuda.is_available():
with self.assertRaisesRegex(RuntimeError, r'Unable to handle autograd'):
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
else:
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Test needs to use spawn multiprocessing")
def test_autograd_fine_with_spawn(self):
ctx = mp.get_context('spawn')
simple_autograd_function()
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_simple(self):
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
self._test_sharing(mp.get_context('spawn'), 'cuda', torch.float)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_memory_allocation(self):
ctx = mp.get_context('spawn')
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_and_delete_tensors, args=(q, e, 'cuda', torch.int, 5))
p.start()
t = []
for _ in range(5):
t.append(q.get())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(t[0], torch.full([5], 0.))
del t
e.set()
p.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_ipc_deadlock(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue(1)
processes = dict(
a=ctx.Process(target=_test_cuda_ipc_deadlock_actor, args=(queue, 100)),
l=ctx.Process(target=_test_cuda_ipc_deadlock_learner, args=(queue, 100)))
for p in processes.values():
p.start()
for p in processes.values():
p.join(10)
for p in processes.values():
self.assertFalse(p.is_alive())
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_send_many(self, name=None, size=5, count=100000):
ctx = mp.get_context('spawn')
q1 = ctx.Queue()
q2 = ctx.Queue()
q3 = ctx.Queue()
e1 = ctx.Event()
e2 = ctx.Event()
e3 = ctx.Event()
p1 = ctx.Process(target=send_and_delete_tensors, args=(q1, e1, 'cuda', torch.long, count, size))
p2 = ctx.Process(target=receive_and_send, args=(q1, q2, e2, count))
p3 = ctx.Process(target=receive_and_send_sum, args=(q2, q3, e3, 'cuda', torch.long, count, size))
p1.start()
p2.start()
p3.start()
result = q3.get()
self.assertEqual(result[0], int(count * (count - 1) / 2))
del result
e1.set()
e2.set()
e3.set()
p1.join(1)
p2.join(1)
p3.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_cuda_small_tensors(self):
# Check multiple small tensors which will likely use the same
# underlying cached allocation
ctx = mp.get_context('spawn')
tensors = []
for i in range(5):
device = i % 2
tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]
inq = ctx.Queue()
outq = ctx.Queue()
inq.put(tensors)
p = ctx.Process(target=sum_tensors, args=(inq, outq))
p.start()
results = []
for _ in range(5):
results.append(outq.get())
p.join()
for i, _tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
# You might think this should be the case, but it's not! After
# data from the CUDA caching allocator goes through IPC, the
# size of the storage is the size of the *cached cudaMalloc for
# the entire memory block* of the storage, not just the storage.
# See Note [CUDA IPC and the caching allocator] for more info
#
# self.assertEqual(storage_size, 5)
# Collect current process (producer) files, make sure nothing holds
# ref to the sent tensors
del _tensor
del tensors
# We need to collect, as CUDA MP implementation holds one shared
# memory 'file' for performance reason
torch.cuda.ipc_collect()
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_cuda_bad_call(self):
# Initialize CUDA
t = torch.zeros(5, 5).cuda().cpu()
inq = mp.Queue()
outq = mp.Queue()
p = mp.Process(target=queue_get_exception, args=(inq, outq))
p.start()
inq.put(t)
p.join()
self.assertIsInstance(outq.get(), RuntimeError)
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_wrong_cuda_fork(self):
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
import torch
from torch.multiprocessing import Process
def run(rank):
torch.cuda.set_device(rank)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
# it would work fine without the line below
x = torch.rand(20, 2).cuda()
p = Process(target=run, args=(rank,))
p.start()
processes.append(p)
for p in processes:
p.join()
""")
self.assertRegex(stderr, "Cannot re-initialize CUDA in forked subprocess.")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue()
ready = ctx.Event()
done = ctx.Event()
p = ctx.Process(target=cuda_multiply_two, args=(queue, ready, done))
p.start()
ready.wait()
with torch.cuda.stream(torch.cuda.Stream()):
tensor = torch.cuda.FloatTensor([1, 1, 1, 1])
# Use a sleep kernel to test events. Without the event, the
# multiply happens before the add.
event = torch.cuda.Event(interprocess=True)
torch.cuda._sleep(20000000) # about 30 ms
tensor.add_(1)
event.record()
queue.put((event, tensor))
done.wait() # must wait until subprocess records event
event.synchronize()
self.assertEqual(list(tensor), [4, 4, 4, 4])
p.join()
@staticmethod
def _test_event_multiprocess_child(event, p2c, c2p):
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
event.synchronize()
c2p.put(1) # notify parent synchronization is done
@unittest.skip("Skipped as this test fails on ROCm")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_multiprocess(self):
event = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(event.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_multiprocess_child,
args=(event, p2c, c2p))
p.start()
c2p.get() # wait for until child process is ready
torch.cuda._sleep(50000000) # spin for about 50 ms
event.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(event.query())
c2p.get() # wait for synchronization in child
self.assertTrue(event.query())
p.join()
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_event_handle_multi_gpu(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
with torch.cuda.device(d1):
# create handle on different device from un-recorded event
e0.ipc_handle()
with torch.cuda.device(d0):
e1 = torch.cuda.Event(enable_timing=False, interprocess=True)
stream = torch.cuda.Stream()
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record(stream)
with torch.cuda.device(d1):
# create handle on different device from recorded event
e1.ipc_handle()
@staticmethod
def _test_event_handle_importer_consumer(handle, p2c, c2p):
e1 = torch.cuda.Event.from_ipc_handle(0, handle)
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
e1.synchronize()
c2p.put(1) # nofity synchronization is done in child
p2c.get() # wait for parent to finish before destructing child event
@unittest.skip("Skipped as this test fails on ROCm")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_handle_importer(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(e0.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_importer_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
c2p.get() # wait for child to become ready
torch.cuda._sleep(50000000) # spin for about 50 ms
e0.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(e0.query())
c2p.get() # wait for synchronization in child
self.assertTrue(e0.query())
p2c.put(1) # notify child that parent is done
p.join()
@staticmethod
def _test_event_handle_exporter_consumer(handle, p2c, c2p):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
e1 = torch.cuda.Event.from_ipc_handle(
torch.cuda.current_device(), handle)
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record()
c2p.put(0)
# wait for parent process finished synchronization before
# destructing e1
p2c.get()
@unittest.skip("Skipped as this test fails on ROCm")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(TEST_WITH_ROCM, 'Skip the test for ROCm')
def test_event_handle_exporter(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_exporter_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
# wait for event in child process is recorded
c2p.get()
self.assertFalse(e0.query())
e0.synchronize()
self.assertTrue(e0.query())
p2c.put(0)
p.join()
def _test_empty_tensor_sharing(self, dtype, device):
q = mp.Queue()
empty = torch.tensor([], dtype=dtype, device=device)
q.put(empty)
out = q.get(timeout=1)
self.assertEqual(out, empty)
def test_empty_tensor_sharing(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cpu'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cpu'))
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_empty_tensor_sharing_cuda(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cuda'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cuda'))
def _test_autograd_sharing(self, var, ctx=mp, is_parameter=False):
device = 'cuda' if var.is_cuda else 'cpu'
ready = ctx.Event()
master_modified = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=autograd_sharing, args=(queue, ready, master_modified, device, is_parameter))
p.daemon = True
p.start()
# This would cause an error if we tried to serialize the hooks,
# because it's a closure and pickle doesn't support closures.
@torch.utils.hooks.unserializable_hook
def hook(*unused):
pass
if var.requires_grad:
var.register_hook(hook)
var._grad = torch.zeros(5, 5, device=device)
queue.put(var)
ready.wait()
var.data[0, 0] = 1000
var.grad.data[:] = torch.ones(5, 5, device=device) * 4
master_modified.set()
worker_ok = queue.get()
self.assertTrue(worker_ok)
self.assertEqual(var.data, torch.ones(5, 5, device=device))
self.assertEqual(var.grad.data, torch.ones(5, 5, device=device) * 4)
p.join(100)
self.assertFalse(p.is_alive())
# Check sharing a cudaMalloc allocation with different types of storage.
# (Issue #11422)
def _test_mixed_types_cuda_sharing(self, ctx=mp):
all_ones = torch.ones(2, 2).float()
all_zeros = torch.zeros(2, 2).byte()
queue = ctx.Queue()
event = ctx.Event()
p = ctx.Process(target=mixed_type_producer, args=(queue, event))
p.start()
for _ in range(10):
float_tensor = queue.get()
byte_tensor = queue.get()
self.assertEqual(float_tensor, all_ones)
self.assertEqual(byte_tensor, all_zeros)
del float_tensor, byte_tensor
event.set()
time.sleep(5)
p.join()
def test_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26).view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var)
# See https://github.com/pytorch/pytorch/issues/14997
@unittest.skipIf(TEST_WITH_ASAN,
"non-deterministically hangs with ASAN")
def test_leaf_variable_sharing(self):
devices = ['cpu']
if torch.cuda.is_available() and not NO_MULTIPROCESSING_SPAWN and TEST_CUDA_IPC:
devices.append('cuda')
for device in devices:
for requires_grad in [True, False]:
var = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(requires_grad)
self.assertTrue(var.is_leaf)
ctx = mp.get_context('spawn') if device == 'cuda' else mp
ready = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=requires_grad_variable_sharing, args=(queue, ready))
p.daemon = True
p.start()
queue.put(var)
ready.wait()
worker_requires_grad = queue.get()
self.assertTrue(worker_requires_grad == requires_grad)
def test_non_leaf_variable_sharing(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
var0 = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(True)
var = var0 * 2
# Don't use a regular Queue; it uses a background thread (which
# means we can't catch the exceptions)
queue = mp.SimpleQueue()
self.assertRaisesRegex(RuntimeError, r'requires_grad', lambda: queue.put(var))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26, device='cuda').view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var, mp.get_context('spawn'))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_mixed_types_cuda_sharing(self):
self._test_mixed_types_cuda_sharing(mp.get_context('spawn'))
def test_parameter_sharing(self):
param = Parameter(torch.arange(1., 26).view(5, 5))
self._test_autograd_sharing(param, is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_parameter_sharing(self):
param = Parameter(torch.arange(1., 26, device='cuda').view(5, 5))
self._test_autograd_sharing(param, mp.get_context('spawn'), is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_integer_parameter_serialization_cpu(self):
self._test_integer_parameter_serialization(device='cpu')
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_integer_parameter_serialization_cuda(self):
self._test_integer_parameter_serialization(device='cuda')
def _test_integer_parameter_serialization(self, device):
param = torch.nn.Parameter(
torch.tensor(0, dtype=torch.int64, device=device),
requires_grad=False
)
ctx = mp.get_context('spawn')
p = ctx.Process(target=integer_parameter_serialization, args=(param,))
p.start()
p.join()
self.assertEqual(
0, p.exitcode,
msg=f'Failed to serialize successfully for "{device}" device!'
)
def test_empty_shared(self):
t = torch.tensor([])
t.share_memory_()
def _test_is_shared(self):
t = torch.randn(5, 5)
self.assertFalse(t.is_shared())
t.share_memory_()
self.assertTrue(t.is_shared())
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_is_shared(self):
self._test_is_shared()
def test_fs_is_shared(self):
with fs_sharing():
self._test_is_shared()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_is_shared_cuda(self):
t = torch.randn(5, 5).cuda()
self.assertTrue(t.is_shared())
if __name__ == '__main__':
run_tests()
|
views.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import copy
import functools
import itertools
import json
import logging
import math
import os
import re
import socket
import shutil
import stat
import tarfile
import tempfile
import time
import threading
import traceback
import yaml
from collections import defaultdict
from datetime import timedelta, datetime
from urllib.parse import unquote
from pathlib import Path
import six
from six.moves.urllib.parse import quote
import markdown
import pendulum
import sqlalchemy as sqla
from croniter import CroniterBadCronError, CroniterBadDateError, CroniterNotAlphaError, croniter
from flask import (
Markup, Response, escape, flash, jsonify, make_response, redirect, render_template, request,
session as flask_session, url_for, g, send_file
)
from flask._compat import PY2
from flask_appbuilder import BaseView, ModelView, expose, has_access, permission_name
from flask_appbuilder.actions import action
from flask_appbuilder.api import BaseApi
# from flask_appbuilder.security.decorators import protect
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_babel import lazy_gettext
import lazy_object_proxy
from jinja2.utils import htmlsafe_json_dumps # type: ignore
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import and_, desc, func, or_, union_all # noqa
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
import airflow
from airflow.configuration import conf
from airflow import models, jobs
from airflow import settings, configuration
from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_success,
set_dag_run_state_to_failed)
from airflow.jobs.backfill_job import BackfillJob
from airflow.models import Connection, DagModel, DagRun, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.exceptions import AirflowException
from airflow.models.dagcode import DagCode
from airflow.ti_deps.dep_context import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS, DepContext
from airflow.utils import timezone
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.configuration import AIRFLOW_HOME
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.db import provide_session, create_session
from airflow.utils.helpers import alchemy_to_dict, render_log_filename
from airflow.utils.state import State
from airflow._vendor import nvd3
from airflow.www_rbac import utils as wwwutils
from airflow.www_rbac.app import app, appbuilder, csrf
from airflow.www_rbac.decorators import action_logging, gzipped, has_dag_access
from airflow.www_rbac.forms import (DateTimeForm, DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
DagRunForm, ConnectionForm)
from airflow.www_rbac.mixins import GitIntegrationMixin
from airflow.www_rbac.widgets import AirflowModelListWidget
from airflow.www_rbac.utils import unpause_dag, move_to_hdfs
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
dagbag = None
def _parse_dags(update_DagModel=False):
global dagbag
if os.environ.get('SKIP_DAGS_PARSING') != 'True':
dagbag = models.DagBag(settings.DAGS_FOLDER, store_serialized_dags=STORE_SERIALIZED_DAGS)
else:
dagbag = models.DagBag(os.devnull, include_examples=False)
if update_DagModel:
# Check if this works b/w multiple gunicorn wokers or not.
for dag in dagbag.dags.values():
dag.sync_to_db()
_parse_dags()
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
######################################################################################
# BaseViews
######################################################################################
@app.errorhandler(404)
def circles(error):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact'), 404
@app.errorhandler(500)
def show_traceback(error):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact',
nukular=ascii_.nukular,
info=traceback.format_exc() if conf.getboolean(
'webserver',
'EXPOSE_STACKTRACE',
fallback=True) else 'Error! Please contact server admin'), 500
class AirflowBaseView(BaseView):
route_base = ''
@provide_session
def audit_logging(event_name, extra, source_ip, session=None):
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log = Log(
event=event_name,
task_instance=None,
owner=user,
extra=extra,
task_id=None,
dag_id=None,
source_ip=source_ip)
session.add(log)
def convert_size(size_bytes):
# TODO: this method is buggy, change it.
# to get size of file
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def get_details(self, dir_path, file_extension):
''' Takes the path and file extension, returns size and last modified time of files
inside the path.
'''
# NOTE: This method use `os.walk`. We may want to use `os.listdir()` instead.
file_data = {}
for r, d, f in os.walk(dir_path):
for file_name in f:
if file_name.endswith(file_extension):
filePath = os.path.join(dir_path, file_name)
if(os.path.exists(filePath)):
fileStatsObj = os.stat(filePath)
modificationTime = time.ctime(fileStatsObj[stat.ST_MTIME]) # get last modified time
size_bytes = os.stat(filePath).st_size
size = AirflowBaseView.convert_size(size_bytes)
temp_dict = {'time': modificationTime.split(' ', 1)[1], 'size': size}
file_data[file_name] = temp_dict
return file_data
def get_len_jar(file_data):
'''To get the number of jar files'''
len_jar = 0
for file in file_data:
if file.endswith(".jar"):
len_jar = len_jar + 1
return len_jar
def get_len_py(file_data):
'''to get the number of py files'''
len_py = 0
for file in file_data:
if file.endswith(".py") or file.endswith(".egg") or file.endswith(".zip"):
len_py = len_py + 1
return len_py
# Make our macros available to our UI templates too.
extra_args = {
'macros': airflow.macros,
}
def render_template(self, *args, **kwargs):
return super(AirflowBaseView, self).render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instanc
scheduler_job=lazy_object_proxy.Proxy(jobs.SchedulerJob.most_recent_job),
**kwargs
)
class Airflow(AirflowBaseView):
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {
'metadatabase': {'status': 'unhealthy'}
}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = jobs.SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat}
return wwwutils.json_response(payload)
@expose('/home')
@has_access
@provide_session
def index(self, session=None):
DM = models.DagModel
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
arg_tags_filter = request.args.getlist('tags', None)
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
arg_tags_filter = None
else:
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
arg_tags_filter = cookie_val.split(',')
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
query = session.query(DM).filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
query = query.filter(~DM.is_paused)
if arg_search_query:
query = query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%') |
DagModel.owners.ilike('%' + arg_search_query + '%')
)
import_errors = session.query(errors.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"dag_import_error")
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
"Broken plugin: [{filename}] {stacktrace}".format(
stacktrace=stacktrace,
filename=filename),
"error")
# Get all the dag id the user could access
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if arg_tags_filter:
query = query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
if 'all_dags' not in filter_dag_ids:
query = query.filter(DM.dag_id.in_(filter_dag_ids))
start = current_page * dags_per_page
end = start + dags_per_page
dags = query.order_by(DagModel.dag_id).options(
joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()
tags = []
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
num_of_all_dags = query.count()
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
user = None
if "COUTURE_WORKFLOW_USER" in os.environ:
user = os.environ['COUTURE_WORKFLOW_USER']
return self.render_template(
'airflow/dags.html',
dags=dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
num_runs=num_runs,
tags=tags,
user=user)
@expose('/dag_stats', methods=['POST'])
@has_access
@provide_session
def dag_stats(self, session=None):
dr = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state))\
.group_by(dr.dag_id, dr.state)
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count,
'dag_id': dag_id,
'color': State.color(state)
})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@has_access
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
allowed_dag_ids = set(appbuilder.sm.get_accessible_dag_ids())
if not allowed_dag_ids:
return wwwutils.json_response({})
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
LastDagRun = (
session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True) # noqa
.group_by(DagRun.dag_id)
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True) # noqa
)
if selected_dag_ids:
LastDagRun = LastDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
RunningDagRun = RunningDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
LastDagRun = LastDagRun.subquery('last_dag_run')
RunningDagRun = RunningDagRun.subquery('running_dag_run')
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun,
and_(LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun,
and_(RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
if selected_dag_ids:
LastTI = LastTI.filter(TI.dag_id.in_(filter_dag_ids))
RunningTI = RunningTI.filter(TI.dag_id.in_(filter_dag_ids))
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count,
'dag_id': dag_id,
'color': State.color(state)
})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@has_access
@provide_session
def last_dagruns(self, session=None):
DagRun = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
query = session.query(
DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('last_run')
).group_by(DagRun.dag_id)
# Filter to only ask for accessible and selected dags
query = query.filter(DagRun.dag_id.in_(filter_dag_ids))
resp = {
r.dag_id.replace('.', '__dot__'): {
'dag_id': r.dag_id,
'last_run': r.last_run.isoformat(),
} for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def code(self, session=None):
all_errors = ""
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except Exception as e:
all_errors += (
"Exception encountered during " +
"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{}\n".format(e)
)
html_code = '<p>Failed to load file.</p><p>Details: {}</p>'.format(
escape(all_errors))
return self.render_template(
'airflow/dag_code.html', html_code=html_code, dag=dag_orm, title=dag_id,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'),
wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/dag_details')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
# FIXME: items needed for this view should move to the database
dag = dag_orm.get_dag(STORE_SERIALIZED_DAGS)
title = "DAG details"
root = request.args.get('root', '')
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
active_runs = models.DagRun.find(
dag_id=dag_id,
state=State.RUNNING,
external_trigger=False
)
return self.render_template(
'airflow/dag_details.html',
dag=dag, title=title, root=root, states=states, State=State, active_runs=active_runs)
@expose('/pickle_info')
@has_access
def pickle_info(self):
d = {}
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if not filter_dag_ids:
return wwwutils.json_response({})
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if 'all_dags' in filter_dag_ids or dag.dag_id in filter_dag_ids:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/rendered')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def rendered(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e:
msg = "Error rendering template: " + escape(e)
if not PY2:
if e.__cause__:
msg += Markup("<br/><br/>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.template_fields:
content = getattr(task, template_field)
if template_field in wwwutils.get_attr_renderer():
html_dict[template_field] = \
wwwutils.get_attr_renderer()[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title)
@expose('/get_logs_with_metadata')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
if request.args.get('try_number') is not None:
try_number = int(request.args.get('try_number'))
else:
try_number = None
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
def _get_logs_with_metadata(try_number, metadata):
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
return logs, metadata
try:
if ti is not None:
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = _get_logs_with_metadata(try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
filename_template = conf.get('core', 'LOG_FILENAME_TEMPLATE')
attachment_filename = render_log_filename(
ti=ti,
try_number="all" if try_number is None else try_number,
filename_template=filename_template)
metadata['download_logs'] = True
def _generate_log_stream(try_number, metadata):
if try_number is None and ti is not None:
next_try = ti.next_try_number
try_numbers = list(range(1, next_try))
else:
try_numbers = [try_number]
for try_number in try_numbers:
metadata.pop('end_of_log', None)
metadata.pop('max_offset', None)
metadata.pop('offset', None)
while 'end_of_log' not in metadata or not metadata['end_of_log']:
logs, metadata = _get_logs_with_metadata(try_number, metadata)
yield "\n".join(logs) + "\n"
return Response(_generate_log_stream(try_number, metadata),
mimetype="text/plain",
headers={"Content-Disposition": "attachment; filename={}".format(
attachment_filename)})
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts",
dag_id=dag.dag_id, task_id=task_id,
execution_date=execution_date, form=form,
root=root, wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/elasticsearch')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def elasticsearch(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
try_number = request.args.get('try_number', 1)
elasticsearch_frontend = conf.get('elasticsearch', 'frontend')
log_id_template = conf.get('elasticsearch', 'log_id_template')
log_id = log_id_template.format(
dag_id=dag_id, task_id=task_id,
execution_date=execution_date, try_number=try_number)
url = 'https://' + elasticsearch_frontend.format(log_id=quote(log_id))
return redirect(url)
@expose('/task')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = \
wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br/>\n- The scheduler is down or under heavy load<br/>\n{}\n"
"<br/>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br/>" if ti.state == State.NONE else ""))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag, title=title)
@expose('/xcom')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dm_db = models.DagModel
ti_db = models.TaskInstance
dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()
ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()
if not ti:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag, title=title)
@expose('/run', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def run(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
from airflow.executors import get_default_executor
executor = get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def trigger(self, session=None):
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
if request.method == 'GET':
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=''
)
dag = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get('conf')
if conf:
try:
run_conf = json.loads(conf)
except ValueError:
flash("Invalid JSON configuration", "error")
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=conf
)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False, only_failed=False):
from airflow.exceptions import AirflowException
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render_template(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def clear(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed, only_failed=only_failed)
@expose('/dagrun_clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_clear(self):
dag_id = request.form.get('dag_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@has_access
@provide_session
def blocked(self, session=None):
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.filter(DR.dag_id.in_(filter_dag_ids))
.group_by(DR.dag_id)
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = dagbag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message=("Here's the list of task instances you are about to mark as failed"),
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message=("Here's the list of task instances you are about to mark as success"),
details=details)
return response
@expose('/dagrun_failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_failed(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_success(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = pendulum.parse(execution_date)
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render_template(
"airflow/confirm.html",
message=("Here's the list of task instances you are about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def failed(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('failed_upstream') == "true"
downstream = request.form.get('failed_downstream') == "true"
future = request.form.get('failed_future') == "true"
past = request.form.get('failed_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def success(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('success_upstream') == "true"
downstream = request.form.get('success_downstream') == "true"
future = request.form.get('success_future') == "true"
past = request.form.get('success_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def tree(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
if num_runs:
num_runs = int(num_runs)
else:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(DR.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs
}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(
start_date=min_date, end_date=base_date, session=session)
task_instances = {}
for ti in tis:
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(ti):
if not ti:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
data = [
ti.state,
ti.try_number,
None, # start_ts
None, # duration
]
if ti.start_date:
# round to seconds to reduce payload size
if six.PY2:
data[2] = int(pendulum.instance(ti.start_date).timestamp())
else:
data[2] = int(ti.start_date.timestamp())
if ti.duration is not None:
data[3] = int(ti.duration)
return data
def recurse_nodes(task, visited):
node_count[0] += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [
encode_ti(task_instances.get((task_id, d)))
for d in dates
],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited) for t in task.downstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
if six.PY2:
node['start_ts'] = int(pendulum.instance(task.start_date).timestamp())
else:
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
if six.PY2:
node['end_ts'] = int(pendulum.instance(task.end_date).timestamp())
else:
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
if task.run_dag_id:
node['run_dag_id'] = task.run_dag_id
return node
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates
],
}
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
external_logs = conf.get('elasticsearch', 'frontend')
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(),
key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
# avoid spaces to reduce payload size
data=htmlsafe_json_dumps(data, separators=(',', ':')),
blur=blur, num_runs=num_runs,
show_external_logs=bool(external_logs))
@expose('/graph')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
# allow_tasks_actions = request.args.get('allow_tasks_actions', 'true') == 'true'
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
'rx': 5,
'ry': 5,
}
})
def get_downstream(task):
for t in task.downstream_list:
edge = {
'source_id': task.task_id,
'target_id': t.task_id,
}
if edge not in edges:
edges.append(edge)
get_downstream(t)
for t in dag.roots:
get_downstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm, session=session)}
# NOTE: Special case when we don't want the actions to be
# performed on dag runs made by DAG Operator.
dagrun = dag.get_dagrun(execution_date=dttm)
allow_tasks_actions = True
if dagrun:
allow_tasks_actions = str(dagrun.run_id).startswith(BackfillJob.ID_PREFIX_RUNDAG) is not True
# print(allow_tasks_actions, dagrun.run_id)
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
'description': t.description,
'run_dag_id': t.run_dag_id,
}
for t in dag.tasks}
for task in task_instances:
task_instances[task]['description'] = tasks[task]['description']
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) \
if hasattr(dag, 'doc_md') and dag.doc_md else ''
external_logs = conf.get('elasticsearch', 'frontend')
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(),
key=lambda x: x.task_type),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
allow_tasks_actions=allow_tasks_actions,
show_external_logs=bool(external_logs))
@expose('/graph-popover')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def graph_popover(self, session=None):
'''
Almost a copy of graph method.
This view is used to show the graph preview in a popover for DAGOperator.
'''
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
# flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return make_response(('DAG not found!!', 200))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
'rx': 5,
'ry': 5,
}
})
def get_downstream(task):
for t in task.downstream_list:
edge = {
'source_id': task.task_id,
'target_id': t.task_id,
}
if edge not in edges:
edges.append(edge)
get_downstream(t)
for t in dag.roots:
get_downstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm, session=session)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
'description': t.description,
'run_dag_id': t.run_dag_id,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) \
if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render_template(
'airflow/graph_popover.html',
dag_id=dag_id,
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "300"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges)
@expose('/duration')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
start_date=min_date, end_date=base_date, session=session)
TF = TaskFail
ti_fails = (
session.query(TF)
.filter(TF.dag_id == dag.dag_id, # noqa
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all() # noqa
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
if tf.duration:
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(start_date=min_date,
end_date=base_date,
session=session):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y.append(ti.prev_attempted_tries)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
start_date=min_date, end_date=base_date, session=session)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
task_id = task.task_id
y[task_id] = []
x[task_id] = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[task_id].append(dttm)
y[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
start_date=min_date, end_date=base_date, session=session)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def paused(self, session=None):
dag_id = request.args.get('dag_id')
is_paused = True if request.args.get('is_paused') == 'false' else False
models.DagModel.get_dagmodel(dag_id).set_is_paused(
is_paused=is_paused,
store_serialized_dags=STORE_SERIALIZED_DAGS)
return "OK"
@expose('/refresh', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.values.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dag = dagbag.get_dag(dag_id)
# sync dag permission
appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/gantt')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(dttm, dttm, session=session)
if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
# determine bars to show in the gantt chart
# all reschedules of one attempt are combinded into one bar
gantt_bar_items = []
for ti in tis:
end_date = ti.end_date or timezone.utcnow()
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries
gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))
tf_count = 0
try_count = 1
prev_task_id = ""
for tf in ti_fails:
end_date = tf.end_date or timezone.utcnow()
start_date = tf.start_date or end_date
if tf_count != 0 and tf.task_id == prev_task_id:
try_count = try_count + 1
else:
try_count = 1
prev_task_id = tf.task_id
gantt_bar_items.append((tf.task_id, start_date, end_date, State.FAILED, try_count))
tf_count = tf_count + 1
task_types = {}
extra_links = {}
for t in dag.tasks:
task_types[t.task_id] = t.task_type
extra_links[t.task_id] = t.extra_links
tasks = []
for gantt_bar_item in gantt_bar_items:
task_id = gantt_bar_item[0]
start_date = gantt_bar_item[1]
end_date = gantt_bar_item[2]
state = gantt_bar_item[3]
try_count = gantt_bar_item[4]
tasks.append({
'startDate': wwwutils.epoch(start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': task_id,
'taskType': task_types[ti.task_id],
'duration': (end_date - start_date).total_seconds(),
'status': state,
'executionDate': dttm.isoformat(),
'try_number': try_count,
'extraLinks': extra_links[ti.task_id],
})
states = {task['status']: task['status'] for task in tasks}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/extra_links')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = airflow.utils.timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response
@expose('/_logout')
@action_logging
def logout(self, session=None):
return redirect(appbuilder.get_url_for_logout)
@expose('/_login')
@action_logging
def login(self, session=None):
return redirect(appbuilder.get_url_for_login)
@expose('/object/task_instances')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm, session=session)}
return json.dumps(task_instances)
class VersionView(AirflowBaseView):
default_view = 'version'
changelogs = None
filepath = settings.CHANGELOG_PATH
found_file = False
try:
with open (filepath, 'r') as f:
changelogs = yaml.safe_load(f)
found_file = True
except IOError:
pass
@expose('/version')
@has_access
def version(self):
return self.render_template(
'airflow/version.html',
found_file = self.found_file,
changelogs = self.changelogs)
class ConfigurationView(AirflowBaseView):
default_view = 'conf'
@expose('/configuration')
@has_access
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class FileUploadBaseView(AirflowBaseView):
'''Generic View for File Upload.'''
__base_url = ''
# NOTE: You can update the below attributes when subclassing this view.
# set template name while using this generic view.
default_view = 'list_view'
template_name = 'airflow/file_upload_base.html'
groups_template_name = 'airflow/file_upload_groups.html'
# group refers to a config group.
default_group = 'default'
regex_valid_groupnames = re.compile('^[A-Za-z0-9_@()-]+$')
accepted_file_extensions = ()
fs_path = None # the path in filesystem where the files should be saved.
title = None
files_editable = False
# TODO: Update URL map a/c to http verbs instead of using path.
# For ex: GET base_url/list should be GET base_url/
# POST base_url/upload should be POST base_url/
# GET base_url/download/filename should be GET base_url/filename
# GET base_url/destroy/filename should be DELETE base_url/filename
# TODO: Instead of using <path:pathname> use <our_own_converter:pathname>
# That converter should allow only one level of subdirectory access.
# https://exploreflask.com/en/latest/views.html#custom-converters
urls_map = {
'list_view': ["/".join(['base_url', 'list', '<path:pathname>']),
"/".join(['base_url', 'list', ''])],
'upload_view': ["/".join(['base_url', 'upload', '<path:pathname>']),
"/".join(['base_url', 'upload', ''])],
'download_view': ["/".join(['base_url', 'download', '<path:pathname>'])],
'destroy_view': ["/".join(['base_url', 'destroy', '<path:pathname>'])],
'edit_view': ["/".join(['base_url', 'edit', '<path:pathname>'])],
}
method_permission_name = {
'list_view': 'access',
'upload_view': 'access',
'download_view': 'access',
'destroy_view': 'access',
'edit_view': 'access'
}
def __init__(self, *args, **kwargs):
base_url = self.__class__.__name__
self.__class__.list_view = copy.deepcopy(self.__class__.list_view)
self.__class__.upload_view = copy.deepcopy(self.__class__.upload_view)
self.__class__.download_view = copy.deepcopy(self.__class__.download_view)
self.__class__.destroy_view = copy.deepcopy(self.__class__.destroy_view)
self.__class__.edit_view = copy.deepcopy(self.__class__.edit_view)
self.__class__.list_view._urls = []
self.__class__.upload_view._urls = []
self.__class__.download_view._urls = []
self.__class__.destroy_view._urls = []
self.__class__.edit_view._urls = []
super().__init__(*args, **kwargs)
for url in self.urls_map['list_view']:
self.__class__.list_view._urls.append(
(url.replace('base_url', base_url), ['GET']))
for url in self.urls_map['upload_view']:
self.__class__.upload_view._urls.append(
(url.replace('base_url', base_url), ['POST']))
for url in self.urls_map['download_view']:
self.__class__.download_view._urls.append(
(url.replace('base_url', base_url), ['GET']))
for url in self.urls_map['destroy_view']:
self.__class__.destroy_view._urls.append(
(url.replace('base_url', base_url), ['GET']))
for url in self.urls_map['edit_view']:
self.__class__.edit_view._urls.append(
(url.replace('base_url', base_url), ['GET', 'POST']))
# self.__class__.list_view._urls.extend(
# (self.urls_map['list_view'].replace('base_url', base_url), ['GET']))
# self.__class__.upload_view._urls.extend(
# (self.urls_map['upload_view'].replace('base_url', base_url), ['POST']))
# self.__class__.download_view._urls.extend(
# (self.urls_map['download_view'].replace('base_url', base_url), ['GET']))
# self.__class__.destroy_view._urls.extend(
# (self.urls_map['destroy_view'].replace('base_url', base_url), ['GET']))
# self.__class__.edit_view._urls.extend(
# (self.urls_map['edit_view'].replace('base_url', base_url), ['GET', 'POST']))
if self.fs_path:
os.makedirs(self.fs_path, exist_ok=True)
# os.makedirs(os.path.join(self.fs_path, self.default_group), exist_ok=True)
@classmethod
def get_base_url(cls):
return cls.__base_url
def get_file_path(self, pathname):
self.check_attr_is_set(self.fs_path)
if isinstance(pathname, str):
path = os.path.join(self.fs_path, pathname)
else:
path = os.path.join(self.fs_path, *pathname)
if self.path_valid(path):
return path
def path_valid(self, path):
'''Path is valid if is inside `self.fs_path`
'''
norm_fs_path = os.path.normpath(self.fs_path)
norm_path = os.path.normpath(os.path.join(self.fs_path, path))
if norm_fs_path == norm_path[:len(norm_fs_path)]:
return True
raise Exception('Illegal Access to other directories not allowed.')
def check_attr_is_set(self, *args):
# TODO: Refactor this to raise more appropriate error messages.
for attr in args:
if not attr:
raise Exception('All arguments not set. Please set them to appropriate value')
def on_save_complete(self):
'''Called when all files uploaded are saved'''
pass
def action_logger(f):
'''
Decorator to log user actions.
Similar to decorator `action_logging` in `decorators.py` but it
logs `f.__class__.__name__` + `f.__name__` as event.
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
with create_session() as session:
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log = Log(
event="{}.{}".format(args[0].__class__.__name__, f.__name__)[:30],
task_instance=None,
owner=user,
extra=str(list(request.args.items())),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'),
source_ip=request.environ['REMOTE_ADDR'])
if 'execution_date' in request.args:
log.execution_date = pendulum.parse(
request.args.get('execution_date'))
session.add(log)
return f(*args, **kwargs)
except Exception as e:
print(e)
return wrapper
def extra_files(self, files):
# use this view to add other objects to file dict here.
return files
@has_access
@action_logger
def list_view(self, pathname=None):
self.check_attr_is_set(self.fs_path, self.accepted_file_extensions)
if pathname:
path = self.get_file_path(pathname)
else:
path = self.fs_path
# pathname = '' # required for path concatenation in file_upload_base.html
files = self.get_details(path, self.accepted_file_extensions)
files = self.extra_files(files)
return self.render_template(
self.template_name,
files=files,
view=self.__class__.__name__,
accepted_file_extensions=self.accepted_file_extensions,
title=self.title,
files_editable=self.files_editable,
pathname=pathname
)
def extra_work_after_file_save(self, filedest, *args, **kwargs):
return
@has_access
@action_logger
def upload_view(self, pathname=None):
list_files = request.files.getlist("file")
files_uploaded = 0
for upload in list_files:
filename = upload.filename
if filename.endswith(self.accepted_file_extensions):
if pathname:
destination = self.get_file_path([pathname, filename])
else:
destination = self.get_file_path(filename)
upload.save(destination)
# print("Extra work")
self.extra_work_after_file_save(destination)
# print("Extra work done")
try:
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'upload_view'),
filename, request.environ['REMOTE_ADDR'])
except Exception:
pass
files_uploaded += 1
else:
flash('File, ' + filename + ' not allowed', 'error')
if files_uploaded:
self.flash_on_upload_done(files_uploaded)
self.on_save_complete()
return redirect(url_for(self.__class__.__name__ + '.list_view', pathname=pathname))
def flash_on_upload_done(self, files_uploaded):
flash(str(files_uploaded) + ' files uploaded!!', 'success')
@has_access
@action_logger
def edit_view(self, pathname):
'''When `self.files_editable` is set to True, you should override this view'''
return make_response(('BAD_REQUEST', 400))
# raise NotImplementedError('Please implement this in your subclass to be able to edit files.')
@has_access
@action_logger
def download_view(self, pathname):
file_path = self.get_file_path(pathname)
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'download_view'),
pathname, request.environ['REMOTE_ADDR'])
return send_file(file_path, as_attachment=True, conditional=True)
@has_access
def destroy_view(self, pathname):
file = Path(self.get_file_path(pathname))
print(file)
if file.exists():
file.unlink()
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'destroy_view'),
pathname, request.environ['REMOTE_ADDR'])
flash('File ' + pathname + ' successfully deleted.', category='warning')
else:
flash('File ' + pathname + ' not found.', category='error')
redirect_pathname = Path(pathname).parent.stem
return redirect(url_for(self.__class__.__name__ + '.list_view', pathname=redirect_pathname))
class SparkDepView(FileUploadBaseView):
fs_path = settings.SPARK_DEPENDENCIES_FOLDER
accepted_file_extensions = ('.jar', '.egg', '.zip', '.py')
title = 'Spark Dependencies'
def on_save_complete(self):
flash('To include the file(s) for spark job, select them from spark configuration.', 'success')
class CodeArtifactView(FileUploadBaseView):
fs_path = settings.CODE_ARTIFACTS_FOLDER
accepted_file_extensions = ('.jar', '.egg', '.zip', '.py')
title = 'Code Artifacts'
class_permission_name = 'Code Artifacts'
class TrainedModelsView(FileUploadBaseView):
base_fs_path = settings.MODEL_SERVERS
temp_fs_path = os.path.join(settings.MODEL_SERVERS, 'tmp')
fs_path = settings.MODEL_SERVERS
# accepted_file_extensions = ('.tar', '.tar.gz')
accepted_file_extensions = ('',)
title = 'Pre-trained models and dataset repositories'
class_permission_name = 'Models and Datasets'
method_permission_name = {
'list_view': 'access',
'upload_view': 'access',
'download_view': 'access',
'destroy_view': 'access',
'edit_view': 'access'
}
template_name = 'airflow/tf_file_upload_base.html'
fs_mapping = {
'tf_models': {
'path': os.path.join(base_fs_path, 'tf-models'),
'extract_on_upload': True,
'update_config': True,
'accept_extensions': ('.tar', '.gz')
},
'spark_models': {
'path': os.path.join(base_fs_path, 'spark-models'),
'extract_on_upload': False,
'update_config': False,
'accept_extensions': ('.tar', '.gz')
},
'other_models': {
'path': os.path.join(base_fs_path, 'other-models'),
'extract_on_upload': True,
'update_config': False,
'accept_extensions': ('.tar', '.gz')
},
'datasets': {
'path': os.path.join(base_fs_path, 'datasets'),
'extract_on_upload': True,
'update_config': False,
'accept_extensions': ('.tar', '.gz')
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key, val in self.fs_mapping.items():
os.makedirs(val['path'], exist_ok=True)
def flash_on_upload_done(self, files_uploaded):
return
@has_access
def list_view(self, pathname=None):
# THIS view should return the same content irrespective of pathname
if pathname:
return redirect(url_for(self.__class__.__name__ + '.list_view', pathname=''))
files = {}
for key, val in self.fs_mapping.items():
# print(key, val)
files[key] = {}
pth = val['path']
# print(pth)
files[key] = self.get_details(pth, ('', ))
files[key] = self.extra_files(files[key], pth)
# print("KEY====>>>>", key, files[key])
# for i in files:
# print(files[i])
return self.render_template(
self.template_name,
files=files,
view=self.__class__.__name__,
title=self.title,
files_editable=False,
pathname=None,
fs_mapping=self.fs_mapping,
max_chunk_size=settings.MAX_CHUNK_SIZE,
max_file_size=settings.MAX_FILE_SIZE
)
@csrf.exempt
@has_access
def upload_view(self, pathname=None):
# print(request.form)
file = request.files['file']
temp_save_path = os.path.join(self.temp_fs_path, request.form['dzuuid'])
current_chunk = int(request.form['dzchunkindex'])
def create_chunks_folder():
os.makedirs(temp_save_path, exist_ok=True)
create_chunks_folder()
logging.info('{} chunk recieved, chunk index {}'.format(
file.filename,
current_chunk)
)
# save the chunk using its index
# TODO: check if lock is required here.
with open(os.path.join(temp_save_path, str(current_chunk)), 'wb') as f:
f.write(file.stream.read())
return make_response(('Chunk {} saved.'.format(current_chunk), 200))
@has_access
# @action_logging
@expose('/TrainedModelsView/extract/', methods=['POST'])
@expose('/TrainedModelsView/extract/<path:pathname>', methods=['POST'])
@csrf.exempt
def extract_view(self, pathname=None):
# return make_response(('Thread to combine files has started.', 400))
temp_save_path = os.path.join(self.temp_fs_path, request.form['dzuuid'])
file = request.form['file']
total_chunks = int(request.form['totalChunkCount'])
# print(pathname)
AirflowBaseView.audit_logging(
'TrainedModelsView.extract',
file,
request.environ['REMOTE_ADDR'])
combine_chunks_thread = threading.Thread(
target=self.extra_work_after_file_save,
args=(temp_save_path, file, total_chunks, pathname))
combine_chunks_thread.start()
return make_response(('Thread to combine files has started.', 200))
def extra_files(self, files, fs_path=None):
'''
This method adds extra files which need to be shown, & removes previously added
files which needn't be shown in the UI.
'''
# print(fs_path)
if not fs_path:
fs_path = self.fs_path
# also removing `models.config` which we don't want to show
dirs = [name for name in os.listdir(fs_path) if os.path.isdir(os.path.join(fs_path, name))]
# print("Dirs ===>>>", dirs)
for d in dirs:
fileStatsObj = os.stat(os.path.join(fs_path, d))
modificationTime = time.ctime(fileStatsObj[stat.ST_MTIME]) # get last modified time
size_bytes = fileStatsObj.st_size
size = AirflowBaseView.convert_size(size_bytes)
temp_dict = {'time': modificationTime.split(' ', 1)[1], 'size': size, 'dir': True}
files[d] = temp_dict
# remove models.config
for file in list(files.keys()):
if file.endswith('.config'):
files.pop(file, None)
return files
def set_config(self, config, pathname):
try:
with open(os.path.join(pathname, 'models.config'), 'w') as f:
f.write(config)
except Exception as e:
print(e)
def get_file_path(self, pathname):
if isinstance(pathname, list):
fs_path = self.fs_mapping.get(pathname[0], self.fs_mapping['other_models'])['path']
# print("FS_PATh ====>", fs_path)
return os.path.join(fs_path, pathname[1])
else:
return os.path.join(self.fs_path, pathname)
def update_models_config(self, pathname):
config = 'model_config_list: {'
dirs = [name for name in os.listdir(pathname) if os.path.isdir(os.path.join(pathname, name))]
for dname in dirs:
# print(dname)
config += '''config: {
name: "%s",
base_path: "/usr/local/couture/trained-models/%s/%s",
model_platform: "tensorflow"
},''' % (dname, 'tf-models', dname)
config += '}'
# flash('All Model servers have been updated')
self.set_config(config, pathname)
AirflowBaseView.audit_logging(
'TrainedModelsView.update_models_config',
extra='',
source_ip=request.environ['REMOTE_ADDR'])
def extra_work_after_file_save(self, temp_save_path, file, total_chunks, pathname):
# TODO: MODIFY THIS HACK.
fs_key = pathname
fs_path = self.fs_mapping[fs_key]['path']
pathname = os.path.join(fs_path, file)
# TODO: check if lock or semaphore is required here.
# assuming all chunks exists:
with open(pathname, 'wb') as f:
for chunk_index in range(0, total_chunks):
with open(os.path.join(temp_save_path, str(chunk_index)), 'rb') as chunk:
f.write(chunk.read())
# delete the temp files
shutil.rmtree(temp_save_path)
logging.info(f'Final Size of file {file}: {os.path.getsize(pathname)}')
if not self.fs_mapping[fs_key]['extract_on_upload']:
return
if str(pathname).endswith(('.tar', '.tar.gz')):
file_path = self.get_file_path(pathname)
with tarfile.open(Path(file_path)) as tar:
for content in tar:
try:
tar.extract(content, path=fs_path)
except Exception as e:
print(e)
existing_content = Path(fs_path).joinpath(content)
if existing_content.is_dir():
shutil.rmtree(existing_content)
else:
existing_content.unlink()
tar.extract(content, path=fs_path)
flash('{} already exists, overwriting'.format(content), category='warning')
# # after extracting, update models.config
# if existing_content.isdir():
finally:
content_path = Path(fs_path).joinpath(content.name)
os.chmod(content_path, content.mode)
# flash('Extracted {}'.format(pathname))
Path(str(pathname)).unlink()
if self.fs_mapping[fs_key]['update_config']:
self.update_models_config(fs_path)
@has_access
# @action_logger
def download_view(self, pathname):
file_path = self.get_file_path(str(pathname).split('/'))
arcname = pathname.split('/')[-1]
if os.path.isdir(file_path):
f = tempfile.SpooledTemporaryFile(suffix='.tar.gz')
with tarfile.open(fileobj=f, mode='w:gz') as tar:
tar.add(file_path, arcname=arcname)
f.flush()
f.seek(0)
AirflowBaseView.audit_logging(
'TrainedModelsView.download_view',
arcname,
request.environ['REMOTE_ADDR'])
return send_file(f,
as_attachment=True,
conditional=True,
attachment_filename='{}.tar.gz'.format(pathname),
mimetype='application/gzip')
return send_file(file_path, as_attachment=True, conditional=True)
@has_access
def destroy_view(self, pathname):
file = Path(self.get_file_path(str(pathname).split('/')))
if file.exists() and file.is_file():
file.unlink()
flash('File ' + pathname + ' successfully deleted.', category='warning')
elif file.exists() and file.is_dir():
shutil.rmtree(file)
flash('Folder ' + pathname + ' successfully deleted.', category='warning')
pth = pathname.split('/')[0]
if self.fs_mapping[pth]['update_config']:
self.update_models_config(self.fs_mapping[pth]['path'])
else:
flash('File/Folder ' + pathname + ' not found.', category='error')
return redirect(url_for(self.__class__.__name__ + '.list_view', pathname=''))
class HadoopConfView(FileUploadBaseView):
# TODO: Merge this with file upload baseview.
default_view = 'groups_view'
fs_path = settings.HADOOP_CONFIGS_FOLDER
accepted_file_extensions = ('.xml', )
title = 'Spark Hadoop Config Groups'
files_editable = True
groups_template_name = 'airflow/hadoop_upload_groups.html'
def get_default_group_path(self):
try:
default_group_p = os.readlink(os.path.join(self.fs_path, self.default_group))
except FileNotFoundError:
default_group_p = ""
if not os.path.isabs(default_group_p):
default_group_p = os.path.join(self.fs_path, default_group_p)
default_group_p = os.path.normpath(default_group_p)
return default_group_p
def get_groups(self):
groups = []
default_group_name = None
default_group_p = self.get_default_group_path()
for f in os.scandir(self.fs_path):
if f.is_dir() and f.name != self.default_group:
if os.path.normpath(f.path) != default_group_p:
groups.append([f, False])
else:
groups.append([f, True])
default_group_name = f.name
return groups, default_group_name
def _change_default_group(self, groupname):
try:
try:
os.remove(os.path.join(self.fs_path, self.default_group))
except OSError:
pass
norm_group_path = os.path.normpath(os.path.join(self.fs_path, groupname))
# Changing putting relative symlink here
os.symlink(os.path.relpath(norm_group_path, self.fs_path),
os.path.join(self.fs_path, self.default_group))
# os.symlink(norm_group_path,
# os.path.join(self.fs_path, self.default_group))
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'change_default_group'),
groupname, request.environ['REMOTE_ADDR'])
except Exception:
# print(e, type(e))
pass
@expose('/HadoopConfView/change-default-group/', methods=['GET'])
@has_access
@FileUploadBaseView.action_logger
def change_default_group(self):
groupname = request.args.get('group')
self._change_default_group(groupname)
return redirect(url_for('HadoopConfView.groups_view'))
@expose('/HadoopConfView/delete-group/<string:groupname>', methods=['GET'])
@has_access
@FileUploadBaseView.action_logger
def delete_group(self, groupname):
default_group_p = self.get_default_group_path()
norm_group_path = os.path.normpath(os.path.join(self.fs_path, groupname))
if default_group_p == norm_group_path:
flash('Cannot delete the default group. Change default group first.', category='warning')
else:
shutil.rmtree(norm_group_path, ignore_errors=True)
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'delete_group'),
groupname, request.environ['REMOTE_ADDR'])
return redirect(url_for('HadoopConfView.groups_view'))
@expose('/HadoopConfView/groups/', methods=['GET', 'POST'])
@has_access
@FileUploadBaseView.action_logger
def groups_view(self):
groups = []
default_group_name = None
if request.method == 'GET':
if not os.path.islink(os.path.join(self.fs_path, self.default_group)):
flash('No Default Hadoop Config Group set', category='warning')
groups, default_group_name = self.get_groups()
return self.render_template(
self.groups_template_name,
groups=groups,
view=self.__class__.__name__,
accepted_file_extensions=self.accepted_file_extensions,
title=self.title,
default_group=default_group_name
)
else:
name = request.form.get('name')
if name and self.regex_valid_groupnames.match(name):
os.makedirs(os.path.join(self.fs_path, name), exist_ok=True)
# copying default spark config.
shutil.copyfile(settings.SAMPLE_SPARK_CONF_PATH, os.path.join(self.fs_path, *[name, 'couture-spark.conf']))
groups, _ = self.get_groups()
if len(groups) <= 1:
self._change_default_group(name)
# making this the default group.
flash('Group added !', category='success')
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'add_group'),
name, request.environ['REMOTE_ADDR'])
else:
flash('Invalid group name provided !', category='error')
return redirect(url_for('HadoopConfView.groups_view'))
@has_access
@action_logging
def edit_view(self, pathname):
from lxml import etree as ET
UPLOAD_FOLDER = self.fs_path
if request.method == 'GET':
xml_file = os.path.join(UPLOAD_FOLDER, pathname)
self.path_valid(xml_file)
tree = ET.parse(xml_file)
root = tree.getroot()
rootname = root.tag
values_get = {}
for p in root.iter('property'):
name = p.find('name').text
value = p.find('value').text
values_get[name] = value # storing all the name and value pairs in values_get dictionary
return self.render_template('airflow/hadoop_conn_file.html',
Section=rootname,
Configurations=values_get,
pathname=pathname)
if request.method == 'POST':
xml_file = os.path.join(UPLOAD_FOLDER, pathname)
self.path_valid(xml_file)
tree = ET.parse(xml_file)
root = tree.getroot()
values = {}
for p in root.iter('property'):
name = p.find('name').text
value = p.find('value').text
values[name] = value # storing all the name and value pairs in values_get dictionary
for prop in root.iter('property'):
name = prop.find('name').text
value = prop.find('value').text
new_value = request.form[name] # extracting updated values from the form
prop.find('value').text = str(new_value) # for saving edit changes in file
for key in request.form:
if key.startswith('new-config-key-') and request.form[key]:
key_no = key.split('-')[-1]
prop = ET.Element("property")
root.append(prop)
nm = ET.SubElement(prop, "name")
nm.text = request.form[key]
val = ET.SubElement(prop, "value")
val.text = request.form['new-config-value-' + key_no]
del_name = request.form.get('option_title_config_delete') # for deleting a property from file
if del_name:
for p in root.iter('property'):
n = p.find('name').text
if n == del_name:
root.remove(p)
tree.write(xml_file) # writing all the updated changes to the fields
return redirect(url_for('HadoopConfView.edit_view', pathname=pathname))
class ExportConfigsView(AirflowBaseView, BaseApi):
_configs_path = [
settings.HADOOP_CONFIGS_FOLDER,
# settings.SPARK_CONF_PATH,
]
_dependencies_path = [
settings.SPARK_DEPENDENCIES_FOLDER,
]
_dags_path = [
settings.DAGS_FOLDER
]
@staticmethod
def filter_files(tarinfo):
# TODO: Filter files here.
return tarinfo
# if str(tarinfo.name).endswith('.py') .....
@staticmethod
def moveTree(sourceRoot, destRoot):
# COPY whole dirs(only) present in sourceRoot.
if not os.path.exists(destRoot):
return False
dirs = [name for name in os.listdir(sourceRoot) if os.path.isdir(os.path.join(sourceRoot, name))]
for folder in dirs:
if not os.path.islink(os.path.join(destRoot, folder)):
shutil.rmtree(os.path.join(destRoot, folder), ignore_errors=True)
else:
os.unlink(os.path.join(destRoot, folder))
# else:
# print("folder:::", folder)
if os.path.islink(os.path.join(sourceRoot, folder)):
os.symlink(os.readlink(os.path.join(sourceRoot, folder)), os.path.join(destRoot, folder))
else:
shutil.copytree(os.path.join(sourceRoot, folder),
os.path.join(destRoot, folder),
copy_function=shutil.copy,
symlinks=True)
files = [name for name in os.listdir(sourceRoot) if os.path.isfile(os.path.join(sourceRoot, name))]
for file in files:
shutil.copy(os.path.join(sourceRoot, file),
os.path.join(destRoot, file))
def export(self, export_paths, name='configs'):
f = tempfile.SpooledTemporaryFile(suffix='.tar.gz')
airflow_home_parent = os.path.normpath(os.path.join(AIRFLOW_HOME, os.pardir))
root_dir = Path(name)
with tarfile.open(fileobj=f, mode='w:gz') as tar:
for path in export_paths:
try:
tar.add(path,
arcname=str(root_dir.joinpath(Path(path).relative_to(airflow_home_parent))),
filter=self.filter_files)
except FileNotFoundError:
pass
f.flush()
f.seek(0)
return send_file(f,
as_attachment=True,
conditional=True,
attachment_filename='{}.tar.gz'.format(name),
mimetype='application/gzip')
def imprt(self, import_paths, tar):
# NOTE: All folder imports are from AIRFLOW_HOME/../
# The sent files should be overwritten and the rest files already
# present in the filesystem should be preserved.
if not tar:
return self.response_400('Missing "sources"')
airflow_home_parent = Path(AIRFLOW_HOME).parent
with tempfile.TemporaryDirectory() as tmpdir:
tarfile.open(fileobj=tar, mode='r:gz').extractall(path=tmpdir)
contents = [name for name in os.listdir(tmpdir) if os.path.isdir(os.path.join(tmpdir, name))]
if not contents:
return self.response_400(
message="No root directory found. Keep a root directory.")
elif len(contents) > 1:
return self.response_400(
message="Multiple directories in root found while extracting.\
Keep only one root directory.")
# go inside the first directory.
root_dir = Path(contents[0])
for path in import_paths:
path_in_tempdir = os.path.join(tmpdir,
*[root_dir, Path(path).relative_to(airflow_home_parent)])
if os.path.isfile(path) and os.path.exists(path_in_tempdir):
shutil.copyfile(path_in_tempdir, path, follow_symlinks=True)
elif os.path.isdir(path) and os.path.exists(path_in_tempdir):
self.moveTree(path_in_tempdir, path)
return self.response(200, message="OK")
@expose('/configs/', methods=['GET', 'POST'])
# @protect(allow_browser_login=True)
@csrf.exempt
def configs(self):
# TODO: enable authentication.
if request.method == 'GET':
return self.export(self._configs_path)
if request.method == 'POST':
return self.imprt(self._configs_path, tar=request.files.get('sources'))
@expose('/dependencies/', methods=['GET', 'POST'])
# @protect(allow_browser_login=True)
@csrf.exempt
def dependencies(self):
# TODO: enable authentication.
if request.method == 'GET':
return self.export(self._dependencies_path, name='dependencies')
if request.method == 'POST':
return self.imprt(self._dependencies_path, tar=request.files.get('sources'))
@expose('/dags/', methods=['GET', 'POST'])
# @protect(allow_browser_login=True)
@csrf.exempt
def dags(self):
# TODO: enable authentication.
if request.method == 'GET':
return self.export(self._dags_path, name='dags')
if request.method == 'POST':
return self.imprt(self._dags_path, tar=request.files.get('sources'))
class EDAView(AirflowBaseView, BaseApi):
default_view = 'source_view'
output_path = os.path.join(settings.EDA_HOME, *['outputs'])
temp_save_path = os.path.join(settings.EDA_HOME, *['tmp'])
hdfs_path = '/data/eda/raw/inputfiles/'
sources_key = 'EDA_Sources'
class_permission_name = 'Exploratory data analysis'
method_permission_name = {
'source_view': 'access',
'source_destroy_view': 'access',
'list_outputs_view': 'access',
'dashboard_view': 'access'
}
def __init__(self, *args, **kwargs):
os.makedirs(self.output_path, exist_ok=True)
os.makedirs(self.temp_save_path, exist_ok=True)
super().__init__(*args, **kwargs)
def get_sources(self):
return models.EdaSource.all_sources()
def get_outputs(self):
outputs_folder = Path(self.output_path)
# Use os.scandir() instead ?
tree = []
curr = 0
dir_node_id = {}
for dirpath, dirs, files in os.walk(self.output_path):
root = Path(dirpath).stem
tree.append({'id': 'node-{}'.format(curr),
'state': {
'opened': 'true'},
'parent': '#' if not curr else dir_node_id[Path(dirpath).parts[-2]],
'text': root})
dir_node_id[root] = 'node-{}'.format(curr)
curr += 1
for file in files:
if file.endswith(('.htm', '.html')):
fpath = Path(os.path.join(dirpath, file))
tree.append({'id': 'node-{}'.format(curr),
'parent': dir_node_id[root],
'text': file,
'type': 'dashboard',
'a_attr': {
'href': url_for('EDAView.dashboard_view',
filename=fpath.relative_to(outputs_folder))
}})
curr += 1
return tree
def add_source(self, conn_uri, source_type, tablename=None):
source_type = models.EdaSourcesEnum(source_type)
models.EdaSource.add_source(conn_uri, source_type, tablename)
# sources = set(self.get_sources())
# sources.add(source)
# sources = list(sources)
# models.Variable.set(self.sources_key, sources, serialize_json=True)
def remove_source(self, source):
models.EdaSource.remove_source(source)
@expose('/eda/sources/', methods=['GET', 'POST'])
@has_access
@csrf.exempt
@action_logging
def source_view(self):
if request.method == "GET":
sources = self.get_sources()
outputs = json.dumps(self.get_outputs())
return self.render_template('eda/eda_sources.html',
sources=sources,
source_types=models.EdaSourcesEnum,
outputs=outputs)
conn_uri = request.form.get('path')
if not conn_uri and not request.files.get('file'):
flash('Invalid source.', category='warning')
elif request.form.get('type') == models.EdaSourcesEnum.hdfs.value:
self.add_source(conn_uri, source_type=models.EdaSourcesEnum.hdfs.value)
flash('Source Added.')
else:
# csv file. Trigger an run immdeiately.
file = request.files.get('file')
# dest = os.path.join(self.temp_save_path, file.filename)
# file.save(dest)
# self.add_source_file(file)
dest = os.path.join(self.temp_save_path, file.filename)
file.save(dest)
# add_source_file_thread = threading.Thread(
# target=self.add_source_file,
# args=(dest, self.hdfs_path))
# add_source_file_thread.start()
# flash(f'File will be copied to HDFS shortly. \
# Trigger EDA on this file at {self.hdfs_path}{file.filename}.')
# move data to hdfs
# just mock the eda source
eda_source = models.EdaSource(connection_uri=dest, source_type='local')
dag_ids = self.create_eda_dags(eda_source, runtype='L0')
_parse_dags(update_DagModel=True)
for dag_id in dag_ids:
unpause_dag(dag_id)
return redirect(url_for('EDAView.source_view'))
# def add_source_file(self, file, hdfs_path):
# hdfs_fileloc = move_to_hdfs(file, hdfs_path)
# # move_to_hdfs_thread = threading.Thread(
# # target=move_to_hdfs,
# # args=(os.path.join(self.temp_save_path, file.filename)))
# # move_to_hdfs_thread.start()
# # TODO: This is done because of the way the hdfs path is handled in EDA DAGs.
# # Check if it is the correct way.
# if str(hdfs_fileloc).startswith('/'):
# hdfs_fileloc = 'hdfs:/' + hdfs_fileloc
# else:
# hdfs_fileloc = 'hdfs://' + hdfs_fileloc
# # print(os.stat(dest))
# self.add_source(hdfs_fileloc, source_type=models.EdaSourcesEnum.hdfs.value)
def create_eda_dags(self, eda_source, runtype):
username = g.user.username
now = datetime.now()
if runtype == 'L0':
eda_type = 'preliminary'
eda_dag_id_prefix = 'EDAPreliminaryDataSummary'
viz_dag_id_prefix = 'EDAPreliminaryVisualisation'
eda_dag_template = 'dags/default_EDA_preliminary_data_summary.jinja2'
viz_dag_template = 'dags/default_EDA_preliminary_visualisations.jinja2'
else:
# L1 type EDA
eda_type = 'data_importance'
eda_dag_id_prefix = 'EDADataScoreAndFeatureSummary'
viz_dag_id_prefix = 'EDADataScoreAndFeatureVisualisation'
eda_dag_template = 'dags/default_EDA_datascore_and_feature_summary.jinja2'
viz_dag_template = 'dags/default_EDA_datascore_and_feature_visualisation.jinja2'
if eda_source.source_type == models.EdaSourcesEnum.database:
output_dirs = [
f'{Path(eda_source.connection_uri).resolve().stem}-{eda_source.tablename}',
eda_type,
now.strftime('%Y-%m-%d-%H:%M:%S')
]
# for hdfs and csv files
else:
output_dirs = [
Path(eda_source.connection_uri).resolve().stem,
eda_type,
now.strftime('%Y-%m-%d-%H:%M:%S')
]
# folder_to_copy_sum is an intermediate directory
folder_to_copy_sum = "-".join([
Path(eda_source.connection_uri).stem,
now.strftime("%d-%m-%Y-%H-%M-%S")])
eda_dag_id = "-".join([
eda_dag_id_prefix,
Path(eda_source.connection_uri).resolve().stem,
now.strftime("%d-%m-%Y-%H-%M-%S")])
code = self.render_template(eda_dag_template,
username=username,
dag_id=eda_dag_id,
source=eda_source,
eda_sources_enum=models.EdaSourcesEnum,
folder_to_copy_sum=folder_to_copy_sum,
now=now)
with open(os.path.join(settings.DAGS_FOLDER, eda_dag_id + '.py'), 'w') as dag_file:
dag_file.write(code)
viz_dag_id = "-".join([
viz_dag_id_prefix,
Path(eda_source.connection_uri).resolve().stem,
now.strftime("%d-%m-%Y-%H-%M-%S")])
code = self.render_template(viz_dag_template,
username=username,
dag_id=viz_dag_id,
source=eda_source,
output_dirs=output_dirs,
eda_dag_id=eda_dag_id,
folder_to_copy_sum=folder_to_copy_sum,
now=now)
with open(os.path.join(settings.DAGS_FOLDER, viz_dag_id + '.py'), 'w') as dag_file:
dag_file.write(code)
flash_msg = '{} EDA run on Source: {} has been scheduled. '.format(eda_type, str(eda_source)) + \
'Output will be found in "{}" directory.'.format('/'.join(output_dirs))
flash(flash_msg)
return (viz_dag_id, eda_dag_id)
@expose('/eda/run/<int:source>', methods=['GET', 'POST'])
@has_access
@csrf.exempt
@action_logging
def run_view(self, source):
eda_source = models.EdaSource.get_by_id(source_id=source)
dag_ids = self.create_eda_dags(eda_source, runtype=request.args.get('type', 'L0'))
# TODO: DO _parse_dags in a separate thread.
_parse_dags(update_DagModel=True)
for dag_id in dag_ids:
unpause_dag(dag_id)
return redirect(url_for('EDAView.source_view'))
@expose('/eda/api/', methods=['POST'])
# @has_access
@csrf.exempt
def source_view_api(self, **kwargs):
# TODO: Add authentication on this view.
# NOTE: if we create more API's we may want to switch to marshmallow
# for data valiation
# print(request.form)
conn_uri = request.form.get('connection_uri')
tablename = request.form.get('tablename')
source_type = request.form.get('source_type')
source_types = [tipe.value for tipe in models.EdaSourcesEnum]
if not conn_uri:
return self.response_400(message="Missing 'connection_uri' in body.")
if not source_type:
return self.response_400(message="Missing 'source_type' in body.")
if source_type not in source_types:
return self.response_400(message="Invalid 'source_type' in body.")
self.add_source(conn_uri, source_type, tablename)
return self.response(201)
@expose('/eda/<path:source>/delete/', methods=['GET', 'POST'])
@has_access
@action_logging
def source_destroy_view(self, source):
try:
self.remove_source(source)
flash('Source removed')
except KeyError:
flash('Source error, {} doesn\'t exists'.format(source))
return redirect(url_for('EDAView.source_view'))
@expose('/eda/<path:source>/', methods=['GET'])
@has_access
@action_logging
def list_outputs_view(self, source):
files = []
dir_contents = os.listdir(self.output_path)
for content in dir_contents:
if content.endswith(('.htm', '.html',)):
files.append(content)
return self.render_template('eda/eda_list.html', files=files, source=source)
@expose('/eda/dashboard/<path:filename>/', methods=['GET'])
@has_access
@action_logging
def dashboard_view(self, filename):
viz = ''
try:
with open(os.path.join(self.output_path, filename)) as f:
viz = f.read()
except Exception:
pass
return self.render_template('eda/eda_outputs.html', visualisations=viz)
class SparkConfView(AirflowBaseView):
default_view = 'update_spark_conf'
@expose('/couture_config/<string:group>', methods=['GET', 'POST'])
@has_access
@action_logging
def update_spark_conf(self, group):
# print(request.form)
title = "Couture Spark Configuration"
import collections
import configparser as CP
config = CP.ConfigParser()
config.optionxform = str
conf_path = os.path.join(settings.HADOOP_CONFIGS_FOLDER, *[group, 'couture-spark.conf'])
setup_path = os.path.join(settings.AIRFLOW_HOME, *[os.pardir, 'jars'])
keytab_path = os.path.join(settings.HADOOP_CONFIGS_FOLDER, *[group, 'keytab'])
if os.path.exists(conf_path):
config.read(filenames=conf_path)
# orderedDictionary used so that the order displayed is same as in file
args = collections.OrderedDict(config.items('arguments'))
configs = collections.OrderedDict(config.items('configurations')) # dictionary created
else:
config.add_section('arguments')
config.add_section('configurations')
args = collections.OrderedDict()
configs = collections.OrderedDict()
files = []
py_files = []
# QUESTION(@ANU): Do we need os.walk here ??
for r, d, f in os.walk(setup_path):
for file in f:
if file.endswith(".jar"):
files.append(file)
if file.endswith(".py") or file.endswith(".egg") or file.endswith(".zip"):
py_files.append(file)
kt_files = []
for r, d, f in os.walk(keytab_path):
for file in f:
kt_files.append(file)
if request.method == 'POST':
config.read(filenames=conf_path)
# orderedDictionary used so that the order displayed is same as in file
args = collections.OrderedDict(config.items('arguments'))
configs = collections.OrderedDict(config.items('configurations')) # dictionary created
# print(request.form.getlist('check'))
# print(request.form.getlist('kt_check'))
# print(request.form.getlist('py_check'))
for i in args:
if i != 'jars' and i != 'py-files' and i != 'keytab':
config.set('arguments', i, request.form[i])
elif i == 'jars': # if the field is jars
list_file = []
filenames = request.form.getlist('check')
for f in filenames:
fn = os.path.join(setup_path, f) # joining the filenames with their path
list_file.append(fn)
jarfiles = ",".join(list_file) # joining all the filenames in a string
config.set('arguments', i, jarfiles) # saving the new updated list of files
elif i == 'keytab': # if the field is keytab
kt_file = []
filenames = request.form.getlist('kt_check')
for f in filenames:
fn = os.path.join(keytab_path, f) # joining the filenames with their path
kt_file.append(fn)
ktfiles = ",".join(kt_file) # joining all the filenames in a string
config.set('arguments', i, ktfiles) # saving the new updated list of files
else:
py_list_file = []
py_filenames = request.form.getlist('py_check')
for f in py_filenames:
fn = os.path.join(setup_path, f) # joining the filenames with their path
py_list_file.append(fn)
pythonfiles = ",".join(py_list_file) # joining all the filenames in a string
config.set('arguments', i, pythonfiles) # saving the new updated list of files
for j in configs:
# print("printing j", j, request.form[j])
config.set('configurations', j, request.form[j]) # saving the new updated fields
# filtering out new keys:
for key in request.form:
if key.startswith('new-arg-key') and request.form[key]:
# adding new fields in config['arguments']
key_no = key.split('-')[-1]
config.set('arguments', request.form[key], request.form['new-arg-value-' + key_no])
elif key.startswith('new-config-key') and request.form[key]:
# adding new fields in config['configurations']
key_no = key.split('-')[-1]
config.set('configurations', request.form[key],
request.form['new-config-value-' + key_no])
try:
# if there is option in the file, then delete
if config.has_option('arguments', request.form['option_title_args_delete']):
# deleting from the config file
config.remove_option('arguments', request.form['option_title_args_delete'])
except Exception:
print("Sorry ! No field found in delete in args")
try:
# if there is option in the file, then delete
if config.has_option('configurations', request.form['option_title_config_delete']):
# deleting from the config file
config.remove_option('configurations', request.form['option_title_config_delete'])
except Exception:
print("Sorry ! No field found in delete in config")
# writing all the changes to the file
with open(conf_path, 'w') as configfile:
config.write(configfile)
new_args = collections.OrderedDict(config.items('arguments'))
new_config = collections.OrderedDict(config.items('configurations'))
len_jar = len(files)
len_py = len(py_files)
kt_len = len(kt_files)
return self.render_template(
'airflow/couture_config.html',
title=title,
Arguments=new_args,
Configurations=new_config,
Files=files,
Py_Files=py_files,
len_jar=len_jar,
len_py=len_py,
kt_len=kt_len,
kt_Files=kt_files,
group=group
)
else:
len_jar = len(files)
len_py = len(py_files)
kt_len = len(kt_files)
return self.render_template(
'airflow/couture_config.html',
title=title,
len=len(args),
Arguments=args,
Configurations=configs,
Files=files, Py_Files=py_files,
len_jar=len_jar, len_py=len_py,
kt_len=kt_len,
group=group,
kt_Files=kt_files)
class LdapConfView(AirflowBaseView):
default_view = 'update_ldap_conf'
@expose('/ldap', methods=['GET', 'POST'])
@has_access
@action_logging
def update_ldap_conf(self):
import collections
import configparser as CP
from airflow.configuration import AIRFLOW_HOME
config = CP.ConfigParser()
config.optionxform = str
conf_path = os.path.normpath(os.path.join(AIRFLOW_HOME, *[os.pardir,
'configs',
'ldap.conf']))
title = "Ldap Configuration"
if request.method == 'POST':
config.read(filenames=conf_path)
args = collections.OrderedDict(config.items('ldap'))
for i in args:
config.set('ldap', i, request.form[i])
with open(conf_path, 'w') as configfile:
config.write(configfile)
new_args = collections.OrderedDict(config.items('ldap'))
return self.render_template(
'airflow/ldap.html', title=title, Arguments=new_args)
else:
try:
config.read(filenames=conf_path)
args = collections.OrderedDict(config.items('ldap'))
return self.render_template(
'airflow/ldap.html', title=title, Arguments=args)
except CP.NoSectionError:
return self.render_template(
'airflow/ldap.html', title=title, error='No LDAP Config Found')
except Exception:
error = '''Error while parsing LDAP conf file. Please check the
file and try again.'''
return self.render_template(
'airflow/ldap.html', title=title, error=error)
class HelpView(AirflowBaseView):
default_view = 'help'
method_permission_name = {
'help': 'show',
}
@expose('/help')
@has_access
def help(self):
try:
return redirect(url_for('static', filename='Couture_AI_Workflow_Orchestrator.pdf'))
except Exception as e:
return str(e)
# @expose('/load_help')
# @has_access
# def file_downloads(self):
# try:
# return self.render_template('airflow/help.html')
# except Exception as e:
# return str(e)
class KeyTabView(AirflowBaseView):
default_view = 'update_keytab'
@expose('/keytab/<string:group>', methods=['GET', 'POST'])
@has_access
@action_logging
def update_keytab(self, group):
# NOTE: refactor this method.
title = "Kerberos Configuration"
add_to_dir = os.path.join(settings.HADOOP_CONFIGS_FOLDER, *[group, 'keytab'])
os.makedirs(add_to_dir, exist_ok=True)
file_name = os.path.join(add_to_dir, 'keytab.conf')
principal = ''
keytab_files = ''
import configparser as CP
import collections
config = CP.ConfigParser()
config.optionxform = str
if os.path.exists(file_name):
config.read(filenames=file_name)
if 'arguments' not in config.sections():
# Set default values here
config.add_section('arguments')
config.set('arguments', 'principal', principal)
config.set('arguments', 'keytab', keytab_files)
with open(file_name, 'w') as f:
config.write(f)
config.read(filenames=file_name)
arguments = collections.OrderedDict(config.items('arguments'))
args = arguments
if request.method == 'POST':
all_files = []
for r, d, f in os.walk(add_to_dir):
for file in f:
if file.endswith(".keytab"):
all_files.append(file)
for i in arguments:
if i == 'principal':
config.set('arguments', i, request.form[i])
principal = request.form[i]
elif i != 'keytab':
config.set('arguments', i, request.form[i])
elif i == 'keytab':
list_file = []
filenames = request.form.getlist('check')
for f in filenames:
fn = os.path.join(add_to_dir, f)
list_file.append(fn)
keytab_files = ",".join(list_file)
config.set('arguments', i, keytab_files)
try:
if config.has_option('arguments', request.form[
'option_title_args_delete']): # if there is option in the file, then delete
# deleting from the config file
config.remove_option('arguments',
request.form['option_title_args_delete'])
else:
print("no such field exists in args now.")
except Exception:
print("Sorry ! No field found in delete in args")
try: # for deleting the keytab files from the folder
del_filename = request.form['option_title_delete_Artifact']
file_data = {}
for r, d, f in os.walk(add_to_dir):
for file_name in f:
if file_name == del_filename:
os.remove(os.path.join(add_to_dir, file_name))
AirflowBaseView.audit_logging(
"keytab_deleted", file_name, request.environ['REMOTE_ADDR'])
flash('File Deleted!!', "warning")
else:
filePath = os.path.join(add_to_dir, file_name)
if os.path.exists(filePath) and filePath.endswith(".keytab"):
fileStatsObj = os.stat(filePath)
modificationTime = time.ctime(fileStatsObj[stat.ST_MTIME])
size = os.stat(filePath).st_size
size = AirflowBaseView.convert_size(size)
temp_dict = {'time': modificationTime.split(' ', 1)[1], 'size': size}
file_data[file_name] = temp_dict
len_keytab = len(file_data)
return redirect(url_for('KeyTabView.update_keytab', group=group))
except Exception:
print("Sorry ! No file to delete")
target = os.path.join(add_to_dir)
os.makedirs(target, exist_ok=True)
try:
for f in request.files.getlist("file"): # for saving a file
filename = f.filename
# if filename.endswith(".keytab"):
destination = os.path.join(target, filename)
f.save(destination)
AirflowBaseView.audit_logging("keytab_added", filename, request.environ['REMOTE_ADDR'])
flash('File Uploaded!!',
"success")
except Exception:
print("No file selected!")
# calling get_details without any extension
file_data = self.get_details(add_to_dir, ".keytab")
len_keytab = len(file_data)
with open(file_name, 'w') as configfile:
config.write(configfile)
conf_path = os.path.join(settings.HADOOP_CONFIGS_FOLDER, *[group, 'couture-spark.conf'])
if os.path.exists(conf_path):
config.read(filenames=conf_path)
else:
config.add_section('arguments')
config.set('arguments', 'principal', principal)
config.set('arguments', 'keytab', keytab_files)
with open(conf_path, 'w') as configfile:
config.write(configfile)
return redirect(url_for('KeyTabView.update_keytab', group=group))
else:
file_data = self.get_details(add_to_dir, ".keytab")
len_keytab = len(file_data)
all_files = []
for r, d, f in os.walk(add_to_dir):
for file in f:
if file.endswith(".keytab"):
all_files.append(file)
return self.render_template('airflow/keytab.html',
title=title,
group=group,
file_data=file_data,
Arguments=args,
len_keytab=len_keytab,
Files=all_files)
@expose("/keytab_download/<string:group>/<string:filename>", methods=['GET', 'POST'])
@has_access
def download(self, group, filename): # for downloading the file passed in the filename
add_to_dir = os.path.join(settings.HADOOP_CONFIGS_FOLDER, *[group])
path_file = os.path.join(add_to_dir, filename)
AirflowBaseView.audit_logging(
'KeyTabView.download_keytab',
f'{group}-{filename}',
request.environ['REMOTE_ADDR'],
)
return send_file(path_file, as_attachment=True, conditional=True)
class JupyterNotebookView(GitIntegrationMixin, AirflowBaseView):
default_view = 'jupyter_notebook'
fs_path = settings.JUPYTER_HOME
class_permission_name = 'Trained Models'
method_permission_name = {
'jupyter_notebook': 'access',
'jupyter_git_status': 'access',
'jupyter_git_logs': 'access',
'push_view': 'access',
'pull_view': 'access',
'run_notebook': 'access'
}
# add this to global in GitIntegrationMixin
config_section = 'JupyterNotebook'
def guess_type(self, val):
try:
val = ast.literal_eval(val)
except ValueError:
pass
return val
def create_jupyter_dag(self, notebook, parameters, schedule=None):
dag_id = "-".join(["JupyterNotebookExceution",
Path(notebook).resolve().stem,
datetime.now().strftime("%d-%m-%Y-%H-%M-%S")])
username = g.user.username
now = datetime.now()
code = self.render_template('dags/default_jupyter_dag.jinja2',
notebook=notebook,
username=username,
parameters=parameters,
dag_id=dag_id,
now=now,
schedule=schedule)
with open(os.path.join(settings.DAGS_FOLDER, dag_id + '.py'), 'w') as dag_file:
dag_file.write(code)
AirflowBaseView.audit_logging(
'JupyterNoetbook.create_jupyter_dag',
notebook,
request.environ['REMOTE_ADDR'],
)
return dag_id
@expose('/jupyter_notebook')
@has_access
@action_logging
def jupyter_notebook(self):
title = "Jupyter Notebook"
notebooks = self.get_details(settings.JUPYTER_HOME, '.ipynb')
current_status, logs = self.get_status()
return self.render_template('airflow/jupyter_notebook.html',
title=title,
# TODO: Load modal in template using ajax
git_template=self.get_git_template(),
view=self.__class__.__name__,
current_status=current_status,
logs=logs,
notebooks=notebooks)
@expose('/jupyter/status')
@has_access
@action_logging
def jupyter_git_status(self):
current_status = self.git_status()
return self.render_template('gitintegration/status_modal.html',
view=self.__class__.__name__,
current_status=current_status)
@expose('/jupyter/logs')
@has_access
@action_logging
def jupyter_git_logs(self):
logs = self.git_logs("--pretty=%C(auto)%h %s, Author=<%aN>, Date=%ai")
return self.render_template('gitintegration/logs_modal.html',
view=self.__class__.__name__,
logs=logs)
@expose('/jupyter/commit/', methods=['POST'])
@has_access
@action_logging
def commit_view(self):
# TODO: Move this to GitIntegrationMixin
form = request.form
files_to_commit = []
commit_file_prefix = 'commit-file-'
for key, val in form.items():
if key.startswith(commit_file_prefix) and val:
files_to_commit.append(key[len(commit_file_prefix):])
# print(files_to_commit)
self.git_add(files_to_commit)
self.git_commit(form['commit-msg'], g.user)
# TODO: Refine flash message
flash('Commited files successfully')
return redirect(url_for('JupyterNotebookView.jupyter_notebook'))
@expose('/jupyter/push/', methods=['GET', 'POST'])
@has_access
@action_logging
def push_view(self):
# TODO: Move this to GitIntegrationMixin
push_status = self.git_push()
if push_status:
flash('Pushed successfully!')
else:
flash('Unknown error while pushing')
return redirect(url_for('JupyterNotebookView.jupyter_notebook'))
@expose('/jupyter/pull/', methods=['GET', 'POST'])
@has_access
@action_logging
def pull_view(self):
# TODO: Move this to GitIntegrationMixin
pull_status = self.git_pull()
if pull_status:
flash('Pulled successfully!')
else:
flash('Unknown error while pulling')
return redirect(url_for('JupyterNotebookView.jupyter_notebook'))
@expose('/jupyter/run-notebook/', methods=['POST'])
@has_access
@action_logging
def run_notebook(self):
notebook = request.form.get('notebook', None)
if not notebook:
return make_response(('Missing notebook.', 500))
parameters = {}
for key, val in request.form.items():
if key.startswith('param-key-'):
key_no = str(key.split('-')[-1])
parameters[val] = self.guess_type(request.form.get('param-value-' + key_no, ''))
schedule = request.form.get('schedule')
if schedule:
try:
croniter(schedule)
except (CroniterBadCronError,
CroniterBadDateError,
CroniterNotAlphaError):
flash('Bad Cron Schedule', category='error')
return redirect(url_for('JupyterNotebookView.jupyter_notebook'))
else:
schedule = '@once'
dag_id = self.create_jupyter_dag(notebook, parameters, schedule=schedule)
flash('Your notebook was scheduled as {}, it should be reflected shortly.'.format(dag_id))
_parse_dags(update_DagModel=True)
unpause_dag(dag_id)
# flash('If dags are paused on default, unpause the created dag.', category='info')
return redirect(url_for('Airflow.graph', dag_id=dag_id))
class GitConfigView(GitIntegrationMixin, AirflowBaseView):
default_view = 'git_config_view'
class_permission_name = 'Git Configuration'
method_permission_name = {
'git_config_view': 'access'
}
fs_path = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@expose('/git-configs', methods=['GET', 'POST'])
@has_access
@action_logging
def git_config_view(self):
config = self.read_config()
if request.method == 'GET':
return self.render_template('gitintegration/git_config_view.html',
title='Git Configuration View',
sections=self.get_sections(),
keys=self.get_keys(),
config=config)
form = request.form
section = form.get('section')
if not section or section not in self.get_sections():
return redirect(url_for('GitConfigView.git_config_view'))
if not config.has_section(section):
config[section] = {}
for key in form.keys():
if key.startswith('config-'):
cleaned_key = key.split('-')[-1]
config[section][cleaned_key] = form[key]
self.write_config(config)
# TODO: Refine flash message.
flash('Git config for {}, set succesfully!'.format(section))
return redirect(url_for('GitConfigView.git_config_view'))
class LivyConfigView(AirflowBaseView):
default_view = 'livy_config_view'
base_fs_path = Path(settings.LIVY_CONF_PATH)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Path(self.fs_path).parent.mkdir(exist_ok=True)
sections = [
'kernel_python_credentials',
'kernel_scala_credentials',
'kernel_r_credentials',
]
keys = {
'username': {
'type': 'text',
},
'password': {
'type': 'text'
},
'url': {
'text': 'url',
},
'auth': {
'type': 'select',
'options': ['None', 'Kerberos', 'Basic_Access']
},
}
def get_sections(self):
return self.sections
def get_keys(self):
return self.keys
def read_config(self, fs_path):
try:
with open(fs_path) as f:
return json.load(f)
except (FileNotFoundError, json.decoder.JSONDecodeError,):
default = {}
for section in self.get_sections():
default[section] = {}
return default
def write_config(self, config, fs_path):
# create .sparkmagic dir.
Path(fs_path).parent.mkdir(exist_ok=True)
with open(fs_path, 'w') as f:
json.dump(config, f)
@expose('/livy-configs/<string:group>', methods=['GET', 'POST'])
@has_access
@action_logging
def livy_config_view(self, group):
fs_path = os.path.join(self.base_fs_path, *[group, '.sparkmagic', 'config.json'])
config = self.read_config(fs_path)
if request.method == 'GET':
return self.render_template('livyconfig/livy_config_view.html',
title='Livy Configuration View',
config=self.read_config(fs_path),
keys=self.get_keys(),
sections=self.get_sections())
form = request.form
section = form.get('section')
if not section or section not in self.get_sections():
return redirect(url_for('LivyConfigView.livy_config_view'))
for key in form.keys():
if key.startswith('config-'):
cleaned_key = key.split('-')[-1]
config[section][cleaned_key] = form[key]
self.write_config(config, fs_path)
AirflowBaseView.audit_logging(
'LivyConfView.livy_config_view',
'',
request.environ['REMOTE_ADDR'],
)
# TODO: Refine flash message.
flash('Livy config for Config group {} : {}, set succesfully!'.format(group, section))
return redirect(url_for('LivyConfigView.livy_config_view', group=group))
class AddDagView(AirflowBaseView):
default_view = 'add_dag'
class_permission_name = "Manage DAG"
method_permission_name = {
"add_dag": "access",
"editdag": "access",
"save_snippet": "access",
"download": "access",
}
# TODO: Refactor this to use FileUploadBaseView.
# TODO: Refactor Codebricks into its own view.
# regex for validating filenames while adding new ones
regex_valid_filenames = re.compile('^[A-Za-z0-9_@()-]+$')
regex_valid_snippetnames = re.compile('^[\sA-Za-z0-9_@()-]+$') # noqa
template_dag_file_path = os.path.join(
app.root_path, *['..', 'config_templates', 'default_dag_template.py'])
dag_file_template = ''
try:
with open(template_dag_file_path, 'r') as f:
dag_file_template = f.read()
except Exception:
pass
def __init__(self, *args, **kwargs):
os.makedirs(settings.DAGS_FOLDER, exist_ok=True)
super().__init__(*args, **kwargs)
def get_dag_file_path(self, filename):
# a dag id is sent instead of fileloc.
if not filename.endswith('.py'):
dag_orm = DagModel.get_dagmodel(filename)
return os.path.join(dag_orm.fileloc)
return os.path.join(settings.DAGS_FOLDER, filename)
def get_snippet_metadata_path(self):
return os.path.join(AIRFLOW_HOME, *['repo', 'dag-snippets.json'])
def get_snippet_file_path(self, title):
filename = self.snippet_title_to_file(title)
return os.path.join(AIRFLOW_HOME, *['repo', filename])
def get_snippets_metadata(self):
snippets_path = self.get_snippet_metadata_path()
with open(snippets_path) as f:
return json.load(f)
def get_snippets(self):
snippets_path = self.get_snippet_metadata_path()
if Path(snippets_path).exists():
metadata = self.get_snippets_metadata()
# print(metadata)
for title in metadata.keys():
try:
with open(self.get_snippet_file_path(title)) as codefile:
snippet = codefile.read()
except Exception:
# print(e)
snippet = ''
metadata[title] = {
'description': metadata[title],
'snippet': snippet
}
return metadata
return dict()
def snippet_title_to_file(self, title):
return title.replace(' ', '_') + '.py'
def save_snippets(self, metadata, new_snippet):
"""Save a new snippet in the repo
Arguments:
metadata {dict} -- with keys `title` and `description` of new snippet
new_snippet {str} -- code of new snippet.
"""
snippets = self.get_snippets_metadata()
snippets[metadata['title']] = metadata['description']
snippets_path = self.get_snippet_metadata_path()
with open(snippets_path, 'w') as f:
json.dump(snippets, f)
with open(self.get_snippet_file_path(metadata['title']), 'w') as f:
f.write(new_snippet)
@expose('/add_dag', methods=['GET', 'POST'])
@action_logging
@has_access
def add_dag(self):
"""This view adds or removes DAG files.
"""
# TODO: Refactor this code further.
# TODO: Change name of this view.
title = "Add DAG"
dags_dir = settings.DAGS_FOLDER
if request.method == 'GET':
del_filename = request.args.get('delete')
if del_filename:
# This will only scan the current directory. If we want to
# scan sub directories, we use `os.walk` instead.
for file_name in os.listdir(dags_dir):
if file_name.endswith(".py") and file_name == del_filename:
os.remove(self.get_dag_file_path(file_name))
AirflowBaseView.audit_logging('dag_deleted',
file_name,
request.environ['REMOTE_ADDR'])
# Thread to update DAGBAG.
threading.Thread(target=_parse_dags, args=(True,)).start()
flash('File ' + file_name + ' Deleted!!', "warning")
break
# The below redirect makes sure that the ?delete=<filename>
# is removed from the GET request, as we are redirecting user
# with 0 GET args. This will prevent any accidental deletion of file.
return redirect(url_for('AddDagView.add_dag'))
elif request.method == 'POST':
list_files = request.files.getlist("file")
# check if a new filename has been sent to be created.
filename = request.form.get('filename')
if len(list_files) > 0:
files_uploaded = 0
for upload in request.files.getlist("file"):
filename = upload.filename
if self.regex_valid_filenames.match(Path(filename).resolve().stem):
destination = self.get_dag_file_path(filename)
upload.save(destination)
AirflowBaseView.audit_logging('dag_added',
filename,
request.environ['REMOTE_ADDR'])
files_uploaded += 1
elif filename:
flash('Only python files allowed !, ' + filename + ' not allowed', 'error')
flash(str(files_uploaded) + ' files uploaded!!', 'success')
_parse_dags(update_DagModel=True)
elif filename:
if self.regex_valid_filenames.match(filename):
filename = '.'.join([filename, 'py'])
if Path(filename).exists():
flash('Dag {} Already present.'.format(filename))
else:
insert_starter_content = request.form.get('insert-template-content')
return redirect(url_for('AddDagView.editdag', filename=filename, insert_starter_content=insert_starter_content, new=True))
else:
flash('Invalid DAG name, DAG not created.', 'error')
# the below redirect is to avoid form resubmission messages when
# we refresh the page in the browser.
return redirect(url_for('AddDagView.add_dag'))
file_data = self.get_details(dags_dir, ".py")
return self.render_template('airflow/add_dag.html', title=title, file_data=file_data)
@expose("/editdag/<string:filename>/", methods=['GET', 'POST'])
@action_logging
@has_access
def editdag(self, filename):
# NOTE: filename can be the name of a dag file or a dag_id as well.
fullpath = self.get_dag_file_path(filename)
new = request.args.get('new', False)
if not (new or Path(fullpath).exists()) and \
self.regex_valid_filenames.match(Path(filename).resolve().stem):
return make_response(('DAG not found', 404))
if request.method == 'POST':
code = request.form['code']
with open(fullpath, 'w') as code_file:
code_file.write(code)
flash('Successfully saved !')
AirflowBaseView.audit_logging(
"{}.{}".format(self.__class__.__name__, 'editdag'),
filename, request.environ['REMOTE_ADDR'])
if new:
_parse_dags(update_DagModel=True)
# TODO Unpause dag here.
# unpause_dag(dag_id)
return redirect(url_for('AddDagView.editdag', filename=filename))
else:
if new:
insert_starter_content = request.args.get('insert_starter_content')
if(insert_starter_content):
code = self.dag_file_template.replace("CoutureExample", os.path.splitext(filename)[0], 1)
else:
code = ""
else:
with open(fullpath, 'r') as code_file:
code = code_file.read()
return self.render_template("airflow/editdag.html",
code=code,
filename=filename,
language_server_url=settings.LANGUAGE_SERVER_URL,
dags_folder_path=settings.DAGS_FOLDER,
new=new,
snippets=self.get_snippets())
@expose("/save_snippet/<string:filename>", methods=['POST'])
@has_access
@action_logging
def save_snippet(self, filename):
snippet_file_path = self.get_snippet_metadata_path()
# creating a path to tasks_folder (works for python >= 3.5)
Path(snippet_file_path).parent.mkdir(parents=True, exist_ok=True)
if request.method == 'POST':
# snippets = self.get_snippets()
metadata = {
'title': request.form['title'],
'description': request.form['description']
}
new_snippet = request.form['snippet']
self.save_snippets(metadata, new_snippet)
# with open(snippet_file_path, 'w') as f:
# json.dump(snippets, f)
return redirect(url_for('AddDagView.editdag', filename=filename))
return make_response(('METHOD_NOT_ALLOWED', 403))
@expose("/dag_download/<string:filename>", methods=['GET', 'POST'])
@has_access
@action_logging
def download(self, filename):
path_file = self.get_dag_file_path(filename)
return send_file(path_file, as_attachment=True)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
def apply(self, query, func): # noqa
if appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss)
base_permissions = ['can_list']
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
route_base = '/xcom'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
add_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
edit_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?",
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
class ConnectionModelView(AirflowModelView):
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
extra_fields = ['extra__jdbc__drv_path', 'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
'extra__google_cloud_platform__num_retries',
'extra__grpc__auth_type',
'extra__grpc__credential_pem_file',
'extra__grpc__scopes']
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted',
'is_extra_encrypted']
add_columns = edit_columns = ['conn_id', 'conn_type', 'host', 'schema',
'login', 'password', 'port', 'extra'] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
@has_dag_access(can_dag_edit=True)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc']:
extra = {
key: formdata[key]
for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
if not hasattr(d, 'get'):
logging.warning('extra field for {} is not iterable'.format(
form.data.get('conn_id', '<unknown>')))
return
for field in self.extra_fields:
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PoolModelView(AirflowModelView):
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
list_columns = ['pool', 'slots', 'used_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(attr):
pool_id = attr.get('pool')
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fused_slots(attr):
pool_id = attr.get('pool')
used_slots = attr.get('used_slots')
if pool_id is not None and used_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{used_slots}</a>").format(url=url, used_slots=used_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(attr):
pool_id = attr.get('pool')
queued_slots = attr.get('queued_slots')
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format(url=url, queued_slots=queued_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'pool': pool_link,
'used_slots': fused_slots,
'queued_slots': fqueued_slots
}
validators_columns = {
'pool': [validators.DataRequired()],
'slots': [validators.NumberRange(min=0)]
}
class VariableModelView(AirflowModelView):
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete', 'can_varimport']
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(attr):
key = attr.get('key')
val = attr.get('val')
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {
'key': [validators.DataRequired()]
}
def prefill_form(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
var_dict = {}
d = json.JSONDecoder()
for var in items:
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@has_access
@action_logging
def varimport(self):
try:
out = request.files['file'].read()
if not PY2 and isinstance(out, bytes):
d = json.loads(out.decode('utf-8'))
else:
d = json.loads(out)
except Exception:
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in d.items():
try:
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
except Exception as e:
logging.info('Variable import failed: {}'.format(repr(e)))
fail_count += 1
else:
suc_count += 1
flash("{} variable(s) successfully updated.".format(suc_count))
if fail_count:
flash("{} variable(s) failed to be updated.".format(fail_count), 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(jobs.BaseJob)
base_permissions = ['can_list']
list_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat',
'executor_class', 'hostname', 'unixname']
search_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat', 'executor_class',
'hostname', 'unixname']
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun)
base_permissions = ['can_list', 'can_add']
add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?",
single=False)
@has_dag_access(can_dag_edit=True)
@provide_session
def action_muldelete(self, items, session=None):
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash("{count} dag runs were set to running".format(count=count))
except Exception as ex:
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False)
@provide_session
def action_set_failed(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False)
@provide_session
def action_set_success(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log)
base_permissions = ['can_list']
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date',
'owner', 'extra', 'source_ip']
search_columns = ['dag_id', 'task_id', 'execution_date', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskInstanceModelView(AirflowModelView):
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance)
base_permissions = ['can_list']
page_size = PAGE_SIZE
list_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url']
search_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date']
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(attr):
log_url = attr.get('log_url')
return Markup(
'<a href="{log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(log_url=log_url)
def duration_f(attr):
end_date = attr.get('end_date')
duration = attr.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action('clear', lazy_gettext('Clear'),
lazy_gettext('Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'),
single=False)
def action_clear(self, tis, session=None):
try:
dag_to_tis = {}
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session=session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(tis)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception:
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session=session)
session.commit()
flash("{count} task instances were set to '{target_state}'".format(
count=count, target_state=target_state))
except Exception:
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_running(self, tis):
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_failed(self, tis):
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_success(self, tis):
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_retry(self, tis):
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's
ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on
Flask-Admin side. https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id) # noqa
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class DagModelView(AirflowModelView):
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagModel)
base_permissions = ['can_list', 'can_show']
list_columns = ['dag_id', 'is_paused', 'last_scheduler_run',
'last_expired', 'scheduler_lock', 'fileloc', 'owners']
formatters_columns = {
'dag_id': wwwutils.dag_link
}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self).get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self).get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
@has_access
@permission_name("list")
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
query = unquote(request.args.get('query', ''))
if not query:
wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.dag_id.ilike('%' + query + '%'))
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.owners.ilike('%' + query + '%'))
# Hide paused dags
if request.args.get('showPaused', 'True').lower() == 'false':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' not in filter_dag_ids:
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
|
Run.py
|
'''This code is a part of the 3D Scanner project'''
'''Developed by team SAAS'''
'''Ekalavya 2017'''
'''IIT Bombay'''
#The necessary packages are imported
import Main #local package
import threading #local package
th1=threading.Thread(target=Main.Main()) #Main() defined inside Main.py is called inside a thread
th1.start() #The thread is started
'''PS:
The Developers have used threading to start processes from the very beginning of the program.
This was done to enable execution of any other process parallely with the main process during any point of the runtime.'''
|
test_thd_distributed.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import fcntl
import multiprocessing
import os
import sys
import time
import unittest
from contextlib import contextmanager
from functools import reduce, wraps
import tempfile
import torch
import torch.cuda
import torch.distributed.deprecated as dist
import torch.nn as nn
import torch.nn.functional as F
from common_utils import TestCase, run_tests
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
BACKEND = os.environ["BACKEND"]
TEMP_DIR = os.environ["TEMP_DIR"]
INIT_METHOD = os.getenv("INIT_METHOD", "env://")
MASTER_PORT = "29500"
DEFAULT_TIMEOUT = 300
CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500}
def get_timeout(test_id):
test_name = test_id.split(".")[-1]
if test_name in CUSTOMIZED_TIMEOUT:
return CUSTOMIZED_TIMEOUT[test_name]
else:
return DEFAULT_TIMEOUT
if not dist.is_available():
print("Distributed not available, skipping tests")
sys.exit(0)
SKIP_IF_NO_CUDA_EXIT_CODE = 75
SKIP_IF_NO_GPU_EXIT_CODE = 76
SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE = 77
SKIP_IF_BACKEND_UNAVAILABLE = 78
def skip_if_no_cuda_distributed(func):
func.skip_if_no_cuda_distributed = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(SKIP_IF_NO_CUDA_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def skip_if_no_gpu(func):
""" Nccl multigpu tests requires at least 2 GPUS. Skip if this is not met"""
func.skip_if_no_gpu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(SKIP_IF_NO_CUDA_EXIT_CODE)
if torch.cuda.device_count() < int(os.environ["WORLD_SIZE"]):
sys.exit(SKIP_IF_NO_GPU_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def skip_if_small_worldsize(func):
func.skip_if_small_worldsize = True
@wraps(func)
def wrapper(*args, **kwargs):
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
sys.exit(SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def apply_hack_for_nccl():
# This is a hack for a known NCCL issue using multiprocess
# in conjunction with multiple threads to manage different GPUs which
# may cause ncclCommInitRank to fail.
# http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4
# It slows down the performance of collective operations.
# Without this setting NCCL might throw unhandled error.
os.environ["NCCL_MAX_NRINGS"] = "1"
@contextmanager
def _lock():
lockfile = os.path.join(TEMP_DIR, "lockfile")
with open(lockfile, "w") as lf:
try:
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def _build_tensor(size, value=None):
if value is None:
value = size
return torch.FloatTensor(size, size, size).fill_(value)
class Barrier(object):
barrier_id = 0
@classmethod
def init(cls):
cls.barrier_id = 0
barrier_dir = os.path.join(TEMP_DIR, "barrier")
for f_name in os.listdir(barrier_dir):
os.unlink(os.path.join(barrier_dir, f_name))
@classmethod
def sync(cls, timeout=5):
cls.barrier_id += 1
barrier_dir = os.path.join(TEMP_DIR, "barrier")
pid = str(os.getpid())
barrier_file = os.path.join(barrier_dir, pid)
with _lock():
with open(barrier_file, "w") as f:
f.write(str(cls.barrier_id))
start_time = time.time()
while True:
arrived = 0
with _lock():
for f_name in os.listdir(barrier_dir):
with open(os.path.join(barrier_dir, f_name), "r") as f:
data = f.read()
if int(data) >= cls.barrier_id:
arrived += 1
if arrived == dist.get_world_size():
break
if time.time() - start_time > timeout:
raise RuntimeError("barrier timeout")
time.sleep(0.1)
# The test network must live at top level so we can test pickling it.
class _FC2(nn.Module):
def __init__(self):
super(_FC2, self).__init__()
self.fc = nn.Linear(10, 50, bias=True)
self.fc.bias.requires_grad = False
def forward(self, x):
x = self.fc(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = _FC2()
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class _DistTestBase(object):
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_group_test(self):
group = [1, 2]
group_id = dist.new_group(group)
rank = dist.get_rank()
if rank not in group:
return ([], None, rank)
return (group, group_id, rank)
def _init_global_test(self):
group = [i for i in range(0, dist.get_world_size())]
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
# HELPER FOR MULTIGPU TESTS
def _init_multigpu_helper(self):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
nGPUs = torch.cuda.device_count()
world_size = dist.get_world_size()
visible_devices = range(nGPUs)
if BACKEND == "nccl":
apply_hack_for_nccl()
nGPUs_per_process = nGPUs // world_size
rank_to_GPU = {
i: list(
visible_devices[i * nGPUs_per_process: (i + 1) * nGPUs_per_process]
)
for i in range(world_size)
}
return rank_to_GPU
# GET RANK
def test_get_rank(self):
test_dir = os.path.join(TEMP_DIR, "test_dir")
pid = str(os.getpid())
num_processes = dist.get_world_size()
with open(os.path.join(test_dir, pid), "w") as f:
f.write(str(dist.get_rank()))
self._barrier()
all_ranks = set()
for f_name in os.listdir(test_dir):
with open(os.path.join(test_dir, f_name), "r") as f:
all_ranks.add(int(f.read()))
self.assertEqual(len(all_ranks), num_processes)
self._barrier()
if dist.get_rank() == 0:
for f_name in os.listdir(test_dir):
os.unlink(os.path.join(test_dir, f_name))
self._barrier()
# SEND RECV
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support send/recv")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support send/recv")
def test_send_recv(self):
rank = dist.get_rank()
tensor = _build_tensor(rank + 1)
for dest in range(0, dist.get_world_size()):
if dest == rank:
continue
dist.send(tensor, dest)
for src in range(0, dist.get_world_size()):
if src == rank:
continue
tensor = _build_tensor(src + 1, value=-1)
expected_tensor = _build_tensor(src + 1)
dist.recv(tensor, src)
self.assertEqual(tensor, expected_tensor)
self._barrier()
# SEND RECV ANY SOURCE
@unittest.skipIf(
BACKEND == "gloo", "Gloo does not support send/recv from any source"
)
@unittest.skipIf(
BACKEND == "nccl", "Nccl does not support send/recv from any source"
)
def test_send_recv_any_source(self):
rank = dist.get_rank()
tensor = _build_tensor(10, rank)
for dest in range(0, dist.get_world_size()):
if dest == rank:
continue
dist.send(tensor, dest)
recv_ranks = set()
for src in range(0, dist.get_world_size()):
if src == rank:
continue
tensor = _build_tensor(10, value=-1)
sender = dist.recv(tensor)
self.assertTrue(tensor.eq(sender).all())
recv_ranks.add(sender)
self.assertEqual(len(recv_ranks), dist.get_world_size() - 1)
self._barrier()
# ISEND
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support isend")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support isend")
def test_isend(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
requests = [
dist.isend(_build_tensor(dest, 10), dest)
for dest in range(1, world_size)
]
for request in requests:
request.wait()
self.assertTrue(request.is_completed())
else:
tensor = _build_tensor(rank, -1)
dist.recv(tensor, 0)
self.assertEqual(tensor, _build_tensor(rank, 10))
self._barrier()
# IRECV
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support irecv")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support irecv")
def test_irecv(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
expected_tensors = [_build_tensor(src, -1) for src in range(1, world_size)]
requests = [
dist.irecv(expected_tensors[src - 1], src)
for src in range(1, world_size)
]
for src in range(1, world_size):
requests[src - 1].wait()
self.assertTrue(requests[src - 1].is_completed())
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
else:
tensor = _build_tensor(rank, 10)
dist.send(tensor, 0)
self._barrier()
# BROADCAST
def _test_broadcast_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
for ttype, value, requires_cuda in [
("torch.FloatTensor", -1e-10, False),
("torch.DoubleTensor", -1e-100, False),
("torch.HalfTensor", -0.1, True),
("torch.CharTensor", -2, False),
("torch.ByteTensor", 129, False),
("torch.IntTensor", -1e5, False),
("torch.LongTensor", -1e15, False),
]:
if requires_cuda and not cuda:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value).type(ttype)
if cuda:
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
if rank == src:
dist.broadcast(expected_tensor, src, group_id)
else:
tensor = _build_tensor(src + 1, -1).type(ttype)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.broadcast(tensor, src, group_id)
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(tensor.ne(expected_tensor).max(), 0)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast(self):
group, group_id, rank = self._init_global_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and Nccl backend supports CUDA allReduce",
)
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_broadcast_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_broadcast_group(self):
group, group_id, rank = self._init_group_test()
self._test_broadcast_helper(group, group_id, rank)
# REDUCE
def _test_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA reduce")
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support reduce")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10)
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.all_reduce(tensor, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.all_reduce(tensor, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo & Nccl backend support CUDA allReduce",
)
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_all_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_all_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_all_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.reduce_op.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_all_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_all_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
)
# SCATTER
def _test_scatter_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, -1)
expected_tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, i) for i in group] if rank == dest else []
)
dist.scatter(tensor, src=dest, scatter_list=tensors, group=group_id)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support scatter")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
def test_scatter(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support scatter")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
@skip_if_small_worldsize
def test_scatter_group(self):
group, group_id, rank = self._init_group_test()
self._test_scatter_helper(group, group_id, rank)
# GATHER
def _test_gather_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, -1) for i in group] if rank == dest else []
)
dist.gather(tensor, dst=dest, gather_list=tensors, group=group_id)
if rank == dest:
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support gather")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather(self):
group, group_id, rank = self._init_global_test()
self._test_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "gloo", "Gloo does not support gather")
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_gather_helper(group, group_id, rank)
# ALL GATHER
def _test_all_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = [_build_tensor(dest + 1, -1) for i in group]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
dist.all_gather(tensors, tensor, group_id)
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_all_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_all_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_helper(group, group_id, rank)
# BARRIER
def _test_barrier_helper(self, group, group_id, rank):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterEqual(time.time(), expected_time[0])
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_barrier(self):
group, group_id, rank = self._init_global_test()
self._test_barrier_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support newGroup")
@skip_if_small_worldsize
def test_barrier_group(self):
group, group_id, rank = self._init_group_test()
self._test_barrier_helper(group, group_id, rank)
def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for src in group:
expected_tensor = _build_tensor(src + 1)
tensors = [
_build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]
]
if rank == src:
tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])
dist.broadcast_multigpu(tensors, src, group_id)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports broadcast multigpu")
@skip_if_no_gpu
def test_broadcast_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _test_all_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
if rank == src:
tensors = [
_build_tensor(src + 1, master_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
else:
tensors = [
_build_tensor(src + 1, worker_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.all_reduce_multigpu(tensors, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports allreduce multigpu")
@skip_if_no_gpu
def test_all_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.reduce_op.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
if rank == src:
tensors = [
_build_tensor(src + 1, master_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value)
self.assertEqual(tensors[0], expected_tensor)
else:
tensors = [
_build_tensor(src + 1, worker_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports reduce multigpu")
@skip_if_no_gpu
def test_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.reduce_op.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_all_gather_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for dest in group:
tensors = [
_build_tensor(dest + 1).cuda(device=i) for i in rank_to_GPU[rank]
]
# construct expected output along with
# a place holder to receive all gather results
output_tensors = []
expected_output = []
output_per_gpu = (
[_build_tensor(dest + 1, -1)] * len(rank_to_GPU[0]) * len(group)
)
expected_per_gpu = (
[_build_tensor(dest + 1)] * len(rank_to_GPU[0]) * len(group)
)
for gpu in rank_to_GPU[rank]:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
dist.all_gather_multigpu(output_tensors, tensors, group_id)
self.assertEqual(output_tensors, expected_output)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports allgather multigpu")
@skip_if_no_gpu
def test_all_gather_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _create_Net(self):
return Net()
def _model_step(self, model):
for param in model.parameters():
if param.grad is not None:
param.data += param.grad
param.grad = None
def _prepare_dummy_data(self, local_bs):
# global_bs for DDP should be divisible by WORLD_SIZE
global_bs = int(WORLD_SIZE) * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
return global_bs, input_cpu, target, loss
# END TO END TEST FOR DISTRIBUTEDDATAPARALLEL
def _test_DDP_helper(self, model, input_var, target, loss):
model.train()
output = model(input_var)
l = loss(output, target)
l.backward()
def _assert_equal_param(self, param_gpu, param_DDP):
self.assertEqual(len(param_gpu), len(param_DDP))
for p_gpu, p_DDP in zip(param_gpu, param_DDP):
self.assertEqual(p_gpu, p_DDP)
def _test_DDP_2iter(
self, model_base, model_DDP, input, target, loss, local_bs, rank, batch_size
):
for _ in range(2):
# single cpu/gpu training
self._test_DDP_helper(model_base, input, target, loss)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[rank * local_bs: (rank + 1) * local_bs],
target[rank * local_bs: (rank + 1) * local_bs],
loss,
)
# Update weights and run a second iteration to shake out errors
self._model_step(model_base)
self._model_step(model_DDP)
self._assert_equal_param(
list(model_base.parameters()), list(model_DDP.module.parameters())
)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
# Test that saving and loading work
with tempfile.TemporaryFile() as tmp_file:
torch.save(model_DDP, tmp_file)
tmp_file.seek(0)
saved_model_DDP = torch.load(tmp_file)
@unittest.skipIf(
BACKEND != "nccl" and BACKEND != "gloo",
"Only Nccl & Gloo backend support DistributedDataParallel",
)
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_DistributedDataParallel(self):
# Run a simple end to end DDP model, use result of single node model
# as baseline
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
# cpu training setup
model = self._create_Net()
# single gpu training setup
model_gpu = copy.deepcopy(model)
gpu_subset = list(rank_to_GPU[rank])
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = copy.deepcopy(model)
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.deprecated.DistributedDataParallel(
model_DDP, device_ids=gpu_subset
)
# dummy data initialization
local_bs = len(gpu_subset)
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 2 iterations
self._test_DDP_2iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
)
self._barrier()
@unittest.skipIf(
BACKEND == "nccl", "nccl does not support DistributedDataParallelCPU"
)
def test_DistributedDataParallelCPU(self):
# Run a simple end to end DDP-CPU model, use result of single node
# model as baseline
group, group_id, rank = self._init_global_test()
# cpu training setup
model_base = self._create_Net()
# DDP-CPU training setup
model_DDP = copy.deepcopy(model_base)
model_DDP = nn.parallel.deprecated.DistributedDataParallelCPU(model_DDP)
# dummy data initialization
local_bs = 2
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 2 iterations
self._test_DDP_2iter(
model_base, model_DDP, input_cpu, target, loss, local_bs, rank, global_bs
)
self._barrier()
if BACKEND == "tcp" or BACKEND == "gloo" or BACKEND == "nccl":
WORLD_SIZE = os.environ["WORLD_SIZE"]
class TestDistBackend(TestCase, _DistTestBase):
MANAGER_PROCESS_RANK = -1
@staticmethod
def manager_join(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MANAGER_PROCESS_RANK:
self._join_and_reduce(fn)
else:
fn(self)
return wrapper
@classmethod
def setUpClass(cls):
os.environ["MASTER_ADDR"] = MASTER_ADDR
os.environ["MASTER_PORT"] = MASTER_PORT
os.environ["WORLD_SIZE"] = WORLD_SIZE
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
setattr(cls, attr, cls.manager_join(fn))
def setUp(self):
super(TestDistBackend, self).setUp()
self.processes = []
self.rank = self.MANAGER_PROCESS_RANK
Barrier.init()
for rank in range(int(WORLD_SIZE)):
self.processes.append(self._spawn_process(rank))
def tearDown(self):
super(TestDistBackend, self).tearDown()
for p in self.processes:
p.terminate()
def _spawn_process(self, rank):
os.environ["RANK"] = str(rank)
name = "process " + str(rank)
process = multiprocessing.Process(target=self._run, name=name, args=(rank,))
process.start()
return process
def _run(self, rank):
self.rank = rank
try:
dist.init_process_group(
init_method=INIT_METHOD, backend=BACKEND, world_size=int(WORLD_SIZE)
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(SKIP_IF_BACKEND_UNAVAILABLE)
# sys.exit(0)
raise
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retreiving a corresponding test and executing it.
getattr(self, self.id().split(".")[2])()
sys.exit(0)
def _join_and_reduce(self, fn):
skip_ok = (
getattr(fn, "skip_if_no_cuda_distributed", False) or
getattr(fn, "skip_if_no_gpu", False) or
getattr(fn, "skip_if_small_worldsize", False)
)
join_timeout = get_timeout(self.id())
for rank, process in enumerate(self.processes):
process.join(join_timeout)
self.assertFalse(
process.is_alive(),
"Timeout waiting for rank %d to terminate" % rank)
first_process = self.processes[0]
for p in self.processes:
self.assertEqual(p.exitcode, first_process.exitcode)
if first_process.exitcode == SKIP_IF_BACKEND_UNAVAILABLE:
raise unittest.SkipTest("Compiled without the " + BACKEND + " backend")
if skip_ok:
# do this first so we don't give an error message about
# mismatched exit codes if the first isn't valid
assert (
first_process.exitcode == 0 or
first_process.exitcode == SKIP_IF_NO_CUDA_EXIT_CODE or
first_process.exitcode == SKIP_IF_NO_GPU_EXIT_CODE or
first_process.exitcode == SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE
)
if first_process.exitcode == SKIP_IF_NO_CUDA_EXIT_CODE:
raise unittest.SkipTest("cuda is not available")
if first_process.exitcode == SKIP_IF_NO_GPU_EXIT_CODE:
raise unittest.SkipTest(
"One unique gpu per process is not available"
)
if first_process.exitcode == SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE:
raise unittest.SkipTest("worldsize is too small to run group tests")
self.assertEqual(first_process.exitcode, 0)
elif BACKEND == "mpi":
WORLD_SIZE = os.environ["WORLD_SIZE"]
dist.init_process_group(init_method=INIT_METHOD, backend="mpi")
class TestMPI(TestCase, _DistTestBase):
pass
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
device.py
|
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope.usb import core as usb_core
from joulescope.usb.api import DeviceEvent
from joulescope.usb.impl_tools import RunUntilDone
from joulescope.usb.core import SetupPacket, ControlTransferResponse
from joulescope.usb.scan_info import INFO
from typing import List
import time
import threading
from contextlib import contextmanager
import platform
import numpy as np
import os
import sys
import struct
import ctypes
import ctypes.util
from ctypes import Structure, c_uint8, c_uint16, c_uint32, c_uint, \
c_int, c_char, c_ssize_t, c_void_p, POINTER, pointer, byref
import logging
log = logging.getLogger(__name__)
STRING_LENGTH_MAX = 255
TRANSFER_TIMEOUT_MS = 1000 # default in milliseconds
find_lib = ctypes.util.find_library('usb-1.0')
if find_lib is None:
if platform.system() == 'Darwin' and getattr(sys, 'frozen', False):
machine = platform.machine()
os_version = platform.release().split('.')[0]
find_lib = os.path.join(sys._MEIPASS, f'{machine}_{os_version}_libusb-1.0.0.dylib')
log.info('Darwin lib: %s', find_lib)
else:
raise RuntimeError('Could not import libusb')
_lib = ctypes.cdll.LoadLibrary(find_lib)
class DescriptorType:
DEVICE = 0x01
CONFIG = 0x02
STRING = 0x03
INTERFACE = 0x04
ENDPOINT = 0x05
BOS = 0x0f
DEVICE_CAPABILITY = 0x10
HID = 0x21
REPORT = 0x22
PHYSICAL = 0x23
HUB = 0x29
SUPERSPEED_HUB = 0x2a
SS_ENDPOINT_COMPANION = 0x30
class TransferType:
CONTROL = 0
ISOCHRONOUS = 1
BULK = 2
INTERRUPT = 3
BULK_STREAM = 4
class TransferStatus:
COMPLETED = 0
ERROR = 1
TIMED_OUT = 2
CANCELLED = 3
STALL = 4
NO_DEVICE = 5
OVERFLOW = 6
class TransferFlags:
SHORT_NOT_OK = 1 << 0
FREE_BUFFER = 1 << 1
FREE_TRANSFER = 1 << 2
ADD_ZERO_PACKET = 1 << 3
class ReturnCodes:
SUCCESS = 0
ERROR_IO = -1
ERROR_INVALID_PARAM = -2
ERROR_ACCESS = -3
ERROR_NO_DEVICE = -4
ERROR_NOT_FOUND = -5
ERROR_BUSY = -6
ERROR_TIMEOUT = -7
ERROR_OVERFLOW = -8
ERROR_PIPE = -9
ERROR_INTERRUPTED = -10
ERROR_NO_MEM = -11
ERROR_NOT_SUPPORTED = -12
ERROR_OTHER = -99
class _libusb_device_descriptor(Structure):
_fields_ = [
('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bcdUSB', c_uint16),
('bDeviceClass', c_uint8),
('bDeviceSubClass', c_uint8),
('bDeviceProtocol', c_uint8),
('bMaxPacketSize0', c_uint8),
('idVendor', c_uint16),
('idProduct', c_uint16),
('bcdDevice', c_uint16),
('iManufacturer', c_uint8),
('iProduct', c_uint8),
('iSerialNumber', c_uint8),
('bNumConfigurations', c_uint8)]
# typedef void (LIBUSB_CALL *libusb_transfer_cb_fn)(struct libusb_transfer *transfer);
libusb_transfer_cb_fn = ctypes.CFUNCTYPE(None, c_void_p)
class _libusb_transfer(Structure):
_fields_ = [
('dev_handle', c_void_p),
('flags', c_uint8),
('endpoint_id', c_uint8),
('endpoint_type', c_uint8),
('timeout_ms', c_uint),
('status', c_int), # enum libusb_transfer_status
('length', c_int),
('actual_length', c_int),
('callback', libusb_transfer_cb_fn),
('user_data', c_void_p),
('buffer', POINTER(c_uint8)),
('num_iso_packets', c_int),
# struct libusb_iso_packet_descriptor iso_packet_desc[ZERO_SIZED_ARRAY];
]
# typedef struct libusb_context libusb_context; - c_void_p
# typedef struct libusb_device libusb_device; - c_void_p
# typedef struct libusb_device_handle libusb_device_handle; c_void_p
# int LIBUSB_CALL libusb_init(libusb_context **ctx);
_lib.libusb_init.restype = c_int
_lib.libusb_init.argtypes = [POINTER(c_void_p)]
# void LIBUSB_CALL libusb_exit(libusb_context *ctx);
_lib.libusb_exit.restype = None
_lib.libusb_exit.argtypes = [c_void_p]
# ssize_t LIBUSB_CALL libusb_get_device_list(libusb_context *ctx,
# libusb_device ***list);
_lib.libusb_get_device_list.restype = c_ssize_t
_lib.libusb_get_device_list.argtypes = [c_void_p, POINTER(POINTER(c_void_p))]
# void LIBUSB_CALL libusb_free_device_list(libusb_device **list,
# int unref_devices);
_lib.libusb_free_device_list.restype = None
_lib.libusb_free_device_list.argtypes = [POINTER(c_void_p), c_int]
# int LIBUSB_CALL libusb_open(libusb_device *dev, libusb_device_handle **dev_handle);
_lib.libusb_open.restype = c_int
_lib.libusb_open.argtypes = [c_void_p, POINTER(c_void_p)]
# void LIBUSB_CALL libusb_close(libusb_device_handle *dev_handle);
_lib.libusb_close.restype = None
_lib.libusb_close.argtypes = [c_void_p]
# int LIBUSB_CALL libusb_set_configuration(libusb_device_handle *dev_handle,
# int configuration);
_lib.libusb_set_configuration.restype = c_int
_lib.libusb_set_configuration.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_claim_interface(libusb_device_handle *dev_handle,
# int interface_number);
_lib.libusb_claim_interface.restype = c_int
_lib.libusb_claim_interface.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_release_interface(libusb_device_handle *dev_handle,
# int interface_number);
_lib.libusb_release_interface.restype = c_int
_lib.libusb_release_interface.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_set_interface_alt_setting(libusb_device_handle *dev_handle,
# int interface_number, int alternate_setting);
_lib.libusb_set_interface_alt_setting.restype = c_int
_lib.libusb_set_interface_alt_setting.argtypes = [c_void_p, c_int, c_int]
# int LIBUSB_CALL libusb_get_device_descriptor(libusb_device *dev,
# struct libusb_device_descriptor *desc);
_lib.libusb_get_device_descriptor.restype = c_int
_lib.libusb_get_device_descriptor.argtypes = [c_void_p, POINTER(_libusb_device_descriptor)]
# int LIBUSB_CALL libusb_control_transfer(libusb_device_handle *dev_handle,
# uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex,
# unsigned char *data, uint16_t wLength, unsigned int timeout);
_lib.libusb_control_transfer.restype = c_int
_lib.libusb_control_transfer.argtypes = [c_void_p, c_uint8, c_uint8, c_uint16, c_uint16,
POINTER(c_uint8), c_uint16, c_int]
# struct libusb_transfer * LIBUSB_CALL libusb_alloc_transfer(int iso_packets);
_lib.libusb_alloc_transfer.restype = POINTER(_libusb_transfer)
_lib.libusb_alloc_transfer.argtypes = [c_int]
# int LIBUSB_CALL libusb_submit_transfer(struct libusb_transfer *transfer);
_lib.libusb_submit_transfer.restype = c_int
_lib.libusb_submit_transfer.argtypes = [POINTER(_libusb_transfer)]
# int LIBUSB_CALL libusb_cancel_transfer(struct libusb_transfer *transfer);
_lib.libusb_cancel_transfer.restype = c_int
_lib.libusb_cancel_transfer.argtypes = [POINTER(_libusb_transfer)]
# void LIBUSB_CALL libusb_free_transfer(struct libusb_transfer *transfer);
_lib.libusb_free_transfer.restype = None
_lib.libusb_free_transfer.argtypes = [POINTER(_libusb_transfer)]
class TimeVal(Structure):
_fields_ = [
("tv_sec", ctypes.c_long),
("tv_usec", ctypes.c_long)
]
# int LIBUSB_CALL libusb_handle_events_timeout(libusb_context *ctx,
# struct timeval *tv);
_lib.libusb_handle_events_timeout.restype = c_int
_lib.libusb_handle_events_timeout.argtypes = [c_void_p, POINTER(TimeVal)]
# int LIBUSB_CALL libusb_handle_events(libusb_context *ctx)
_lib.libusb_handle_events.restype = c_int
_lib.libusb_handle_events.argtypes = [c_void_p]
class HotplugFlag:
NONE = 0
ENUMERATE = 1 << 0
class HotplugEvent:
DEVICE_ARRIVED = 0x01
DEVICE_LEFT = 0x02
HOTPLUG_MATCH_ANY = -1
# typedef int (LIBUSB_CALL *libusb_hotplug_callback_fn)(libusb_context *ctx,
# libusb_device *device,
# libusb_hotplug_event event,
# void *user_data);
_libusb_hotplug_callback_fn = ctypes.CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_void_p)
# int LIBUSB_CALL libusb_hotplug_register_callback(libusb_context *ctx,
# libusb_hotplug_event events,
# libusb_hotplug_flag flags,
# int vendor_id, int product_id,
# int dev_class,
# libusb_hotplug_callback_fn cb_fn,
# void *user_data,
# libusb_hotplug_callback_handle *callback_handle);
_lib.libusb_hotplug_register_callback.restype = c_int
_lib.libusb_hotplug_register_callback.argtypes = [c_void_p, c_int, c_int, c_int, c_int, c_int,
_libusb_hotplug_callback_fn, c_void_p, POINTER(c_int)]
# void LIBUSB_CALL libusb_hotplug_deregister_callback(libusb_context *ctx,
# libusb_hotplug_callback_handle callback_handle);
_lib.libusb_hotplug_deregister_callback.restype = c_int
_lib.libusb_hotplug_deregister_callback.argtypes = [c_void_p, c_int]
class Capability:
HAS_CAPABILITY = 0x0000
HAS_HOTPLUG = 0x0001
HAS_HID_ACCESS = 0x0100
SUPPORTS_DETACH_KERNEL_DRIVER = 0x0101
# int LIBUSB_CALL libusb_has_capability(uint32_t capability);
_lib.libusb_has_capability.restype = c_int
_lib.libusb_has_capability.argtypes = [c_uint32]
def _libusb_context_create():
ctx = c_void_p()
rc = _lib.libusb_init(pointer(ctx))
if rc:
raise RuntimeError('Could not open libusb')
return ctx
def _libusb_context_destroy(ctx):
_lib.libusb_exit(ctx)
@contextmanager
def _libusb_context():
ctx = _libusb_context_create()
try:
yield ctx
finally:
_libusb_context_destroy(ctx)
def _path_split(path):
vid, pid, serial_number = path.split('/')
return int(vid, 16), int(pid, 16), serial_number
def _get_string_descriptor(device, index):
request_type = usb_core.RequestType(direction='in', type_='standard', recipient='device').u8
byte_buffer = bytearray(STRING_LENGTH_MAX)
buffer_type = c_uint8 * STRING_LENGTH_MAX
buffer = buffer_type.from_buffer(byte_buffer)
# determine default language
rv = _lib.libusb_control_transfer(device, request_type, usb_core.Request.GET_DESCRIPTOR,
(DescriptorType.STRING << 8), 0,
buffer, STRING_LENGTH_MAX,
1000)
if rv < 0:
raise RuntimeError('control_transfer could not get language: %d' % (rv, ))
langid = int(byte_buffer[2]) | (int(byte_buffer[3]) << 8)
rv = _lib.libusb_control_transfer(device, request_type, usb_core.Request.GET_DESCRIPTOR,
(DescriptorType.STRING << 8) | (index & 0xff), langid,
buffer, STRING_LENGTH_MAX,
1000)
if rv < 0:
raise RuntimeError('control transfer could not get string descriptor: %d' % (rv, ))
buffer_len = min(rv, byte_buffer[0])
# byte 0 is length, byte 1 is string identifier
return byte_buffer[2:buffer_len].decode('UTF-16-LE')
_transfer_callback_discard_fn = libusb_transfer_cb_fn(lambda x: None)
"""Default null callback that is always safe."""
class Transfer:
def __init__(self, size):
try:
self.size = len(size) # also serves as list-like duck-typing test
self.buffer = np.frombuffer(size, dtype=np.uint8)
log.debug('Transfer: copy buffer %d', self.size)
except TypeError:
self.size = size
self.buffer = np.full(self.size, 0, dtype=np.uint8)
log.debug('Transfer: create buffer %d', self.size)
self.transfer = _lib.libusb_alloc_transfer(0) # type: _libusb_transfer
self.addr = ctypes.addressof(self.transfer.contents)
transfer = self.transfer[0]
self.buffer_ptr = self.buffer.ctypes.data_as(POINTER(c_uint8))
transfer.buffer = self.buffer_ptr
transfer.flags = 0
transfer.length = self.size
transfer.actual_length = 0
transfer.user_data = None
transfer.num_iso_packets = 0
transfer.status = TransferStatus.COMPLETED
transfer.timeout_ms = TRANSFER_TIMEOUT_MS # milliseconds
transfer.callback = _transfer_callback_discard_fn
def __del__(self):
_lib.libusb_free_transfer(self.transfer)
class ControlTransferAsync:
def __init__(self, handle):
"""Manage asynchronous control transfers.
:param handle: The device handle.
"""
self._handle = handle
self._transfer_callback_fn = libusb_transfer_cb_fn(self._transfer_callback)
self._commands = [] # Pending control transfer commands as list of [cbk_fn, setup_packet, buffer]
self._transfer_pending = None # type: Transfer
self._time_start = None
self.stop_code = None
def __str__(self):
return 'ControlTransferAsync()'
def __len__(self):
return len(self._commands)
@property
def is_busy(self):
return 0 != len(self._commands)
def _transfer_callback(self, transfer_void_ptr):
if self._transfer_pending is None:
log.warning('Transfer callback when none pending')
return
if self._transfer_pending.addr != transfer_void_ptr:
log.warning('Transfer mismatch')
return
transfer, self._transfer_pending = self._transfer_pending, None
if self._commands:
self._finish(self._commands.pop(0), transfer)
else:
log.warning('Transfer callback when no commands')
self._issue()
def _abort_all(self):
commands, self._commands = self._commands, []
status = self.stop_code if self.stop_code is not None else TransferStatus.CANCELLED
for cbk_fn, setup_packet, _ in commands:
try:
response = usb_core.ControlTransferResponse(setup_packet, status, None)
cbk_fn(response)
except Exception:
log.exception('in callback while aborting')
def close(self):
if self.stop_code is None:
self.stop_code = 0
handle, self._handle = self._handle, None
if handle and self._transfer_pending:
log.info('ControlTransferAsync.close cancel pending transfer, %d', len(self._commands))
# callback function will be invoked later
_lib.libusb_cancel_transfer(self._transfer_pending.transfer)
else:
log.info('ControlTransferAsync.close %d', len(self._commands))
self._abort_all()
def pend(self, cbk_fn, setup_packet: usb_core.SetupPacket, buffer=None):
"""Pend an asynchronous Control Transfer.
:param cbk_fn: The function to call when the control transfer completes.
A :class:`usb_core.ControlTransferResponse` is the sole argument.
:param setup_packet:
:param buffer: The buffer (if length > 0) for write transactions.
:return: True if pending, False on error.
"""
command = [cbk_fn, setup_packet, buffer]
was_empty = not bool(self._commands)
self._commands.append(command)
if was_empty:
return self._issue()
return True
def _issue(self):
if not self._commands:
return True
if not self._handle:
log.info('_issue but handle not valid')
self._abort_all()
return False
if self.stop_code is not None:
log.info('_issue but stop_code=%s', self.stop_code)
self._abort_all()
return False
log.debug('preparing')
_, setup_packet, buffer = self._commands[0]
hdr = struct.pack('<BBHHH', setup_packet.request_type, setup_packet.request,
setup_packet.value, setup_packet.index, setup_packet.length)
if buffer is not None:
transfer = Transfer(hdr + buffer)
else:
transfer = Transfer(len(hdr) + setup_packet.length)
transfer.buffer[:len(hdr)] = np.frombuffer(hdr, dtype=np.uint8)
t = transfer.transfer[0]
t.dev_handle = self._handle
t.endpoint_id = 0
t.endpoint_type = TransferType.CONTROL
t.callback = self._transfer_callback_fn
self._transfer_pending = transfer
self._time_start = time.time()
rv = _lib.libusb_submit_transfer(transfer.transfer)
if 0 == rv:
log.debug('libusb_submit_transfer [control]')
else:
log.warning('libusb_submit_transfer [control] => %d', rv)
if t.status == 0:
if rv == ReturnCodes.ERROR_NO_DEVICE:
log.info('control transfer but no device')
t.status = TransferStatus.NO_DEVICE
else:
t.status = TransferStatus.ERROR
if self.stop_code is None:
self.stop_code = DeviceEvent.COMMUNICATION_ERROR
self._transfer_callback(transfer.addr)
return False
return True
def _finish(self, command, transfer):
buffer = None
rc = transfer.transfer[0].status
cbk_fn, setup_packet, _ = command
pkt = usb_core.RequestType(value=setup_packet.request_type)
duration = time.time() - self._time_start
if rc == TransferStatus.NO_DEVICE:
log.warning('device_removed')
if self.stop_code is None:
self.stop_code = DeviceEvent.COMMUNICATION_ERROR
if pkt.direction == 'out':
log.debug('ControlTransferAsync._finish rc=%d, duration=%.6f s', rc, duration)
else:
actual_length = transfer.transfer[0].actual_length
log.debug('ControlTransferAsync._finish rc=%d, duration=%.6f s, length: %s, %s',
rc, duration, setup_packet.length, actual_length)
buffer = bytes(transfer.buffer[8:(actual_length+8)])
response = usb_core.ControlTransferResponse(setup_packet, rc, buffer)
cbk_fn(response)
class EndpointIn:
def __init__(self, handle, pipe_id, transfers, block_size, data_fn, process_fn, stop_fn):
"""Manage an in endpoint.
:param handle: The device handle.
:param pipe_id: The endpoint IN pipe identifier.
:param transfers: The number of outstanding transfers to pend.
:param block_size: The size of each transfer in bytes.
:param data_fn: The function to call with the received endpoint IN data.
After the last block, data_fn is called with None to indicate the
last transfer. The data_fn should normally return True, but can
return False to stop the endpoint streaming.
:param process_fn: The function() called after data_fn was called.
This function can have more latency than data_fn.
:param stop_fn: The function(event, message) called when this endpoint
stops streaming data.
"""
self._handle = handle
self.pipe_id = pipe_id # (endpoint_id & 0x7f) | 0x80
self._config = {
'transfer_count': transfers,
'transfer_size_bytes': (block_size + 511 // 512)
}
self._data_fn = data_fn
self._process_fn = process_fn
self._stop_fn = stop_fn
self.stop_code = None
self.stop_message = ''
self._state = self.ST_IDLE
self._time_last = None
self._transfers_free = [] # Transfer
self._transfers_pending = [] # Transfer
self.transfers_processed = 0
self.transfer_count = 0
self.byte_count_window = 0
self.byte_count_total = 0
self._transfer_callback_fn = libusb_transfer_cb_fn(self._transfer_callback)
self._init()
ST_IDLE = 0
ST_RUNNING = 1
ST_STOPPING = 2
def __str__(self):
return 'EndpointIn(0x%02x)' % (self.pipe_id, )
def __len__(self):
return len(self._transfers_pending)
@property
def is_busy(self):
return 0 != len(self._transfers_pending)
def _transfer_pending_pop(self, transfer_void_ptr):
for idx in range(len(self._transfers_pending)):
if transfer_void_ptr == self._transfers_pending[idx].addr:
return self._transfers_pending.pop(idx)
log.warning('%s _transfer_pending_pop not found', self)
raise IOError('%s _transfer_pending_pop not found' % (self, ))
def _transfer_done(self):
if self.stop_code is None:
log.warning('%s transfer_done by stop_code not set', self)
self.stop_code = 0
try:
self._data_fn(None)
except Exception:
log.exception('_data_fn exception: stop streaming')
try:
self._stop_fn(self.stop_code, self.stop_message)
except Exception:
log.exception('_stop_fn exception')
self._state = self.ST_IDLE
log.info('%s transfer_done %d: %s', self, self.stop_code, self.stop_message)
def _transfer_callback(self, transfer_void_ptr):
transfer = self._transfer_pending_pop(transfer_void_ptr)
self.transfer_count += 1
t = transfer.transfer[0]
try:
if self._state == self.ST_RUNNING:
if t.status == TransferStatus.COMPLETED:
self.byte_count_window += t.actual_length
self.byte_count_total += t.actual_length
self.transfers_processed += 1
buffer = transfer.buffer[:t.actual_length]
try:
rv = bool(self._data_fn(buffer))
except Exception:
log.exception('data_fn exception: stop streaming')
rv = True
if rv:
self._cancel(0, 'terminated by data_fn')
elif t.status == TransferStatus.TIMED_OUT:
log.warning('%s: timed out', self)
else:
msg = f'transfer callback with status {t.status}'
self._cancel(DeviceEvent.COMMUNICATION_ERROR, msg)
finally:
self._transfers_free.append(transfer)
self._pend()
if self._state == self.ST_STOPPING:
if 0 == len(self._transfers_pending):
self._transfer_done()
else:
log.debug('awaiting transfer completion')
def _init(self):
for i in range(self._config['transfer_count']):
transfer = Transfer(self._config['transfer_size_bytes'])
t = transfer.transfer[0]
t.dev_handle = self._handle
t.endpoint_id = self.pipe_id
t.endpoint_type = TransferType.BULK
t.callback = self._transfer_callback_fn
self._transfers_free.append(transfer)
def _pend(self):
while self._state == self.ST_RUNNING and len(self._transfers_free):
transfer = self._transfers_free.pop(0)
transfer.transfer[0].status = TransferStatus.COMPLETED
rv = _lib.libusb_submit_transfer(transfer.transfer)
if rv:
self._transfers_free.append(transfer)
if rv in [ReturnCodes.ERROR_BUSY]:
log.info('libusb_submit_transfer busy')
else: # no device, not supported, or other error
msg = f'libusb_submit_transfer => {rv}'
self._cancel(DeviceEvent.COMMUNICATION_ERROR, msg)
break # give system time to recover
else:
self._transfers_pending.append(transfer)
def _cancel(self, stop_code=None, stop_msg=None):
if self._state != self.ST_RUNNING:
return
if self.stop_code is None:
stop_code = 0 if stop_code is None else int(stop_code)
stop_msg = '' if stop_msg is None else str(stop_msg)
self.stop_code = stop_code
self.stop_message = stop_msg
lvl = logging.INFO if stop_code <= 0 else logging.ERROR
log.log(lvl, 'endpoint halt %d: %s', stop_code, stop_msg)
self._state = self.ST_STOPPING
log.info('%s cancel %d : %d', self, self.stop_code, len(self._transfers_pending))
for transfer in self._transfers_pending:
_lib.libusb_cancel_transfer(transfer.transfer)
# callbacks will be invoked later
def process_signal(self):
rv = False
if self.transfer_count and self._state == self.ST_RUNNING:
self.transfer_count = 0
try:
if callable(self._process_fn):
rv = bool(self._process_fn())
except Exception:
log.exception('_process_fn exception: stop streaming')
rv = True # force stop
if rv:
self._cancel(0, 'terminated by process_fn')
return rv
def start(self):
log.info("%s start transfer size = %d bytes" % (self, self._config['transfer_size_bytes']))
self.transfer_count = 0
self.byte_count_window = 0
self.byte_count_total = 0
self.stop_code = None
self.stop_message = None
self._state = self.ST_RUNNING
self._time_last = time.time()
self._pend()
time.sleep(0.0001)
def stop(self):
self._cancel(0, 'stop by method request')
def status(self):
"""Get the endpoint status.
:return: A dict mapping status name to a value.. The value is a dict
containing 'value' and 'units'.
"""
time_now = time.time()
duration = time_now - self._time_last
if duration < 0.01:
throughput = 0.0
else:
throughput = self.byte_count_window / duration
status = {
'bytes': {'value': self.byte_count_total, 'units': 'bytes'},
'transfers': {'value': self.transfer_count, 'units': 'transfers'},
'duration': {'value': duration, 'units': 'seconds'},
'throughput': {'value': throughput, 'units': 'Bps'},
}
self.byte_count_window = 0
self._time_last = time_now
return status
def may_raise_ioerror(rv, msg):
if 0 != rv:
s = msg + (' [%d]' % (rv, ))
log.warning(s)
raise IOError(s)
else:
log.debug('%s: success', msg.split(' ')[0])
class LibUsbDevice:
"""The LibUSB :class:`usb.api.Device` implementation"""
def __init__(self, path):
self._ctx = None
self._path = path
self._handle = c_void_p(None)
self._endpoints = {}
self._control_transfer = None # type: ControlTransferAsync
self._removed = False
self._event_callback_fn = None
def __str__(self):
return f'Joulescope:{self.serial_number}'
@property
def serial_number(self):
return self._path.split('/')[-1]
def _open(self):
log.info('open: start %s', self._path)
self._ctx = _libusb_context_create()
descriptor = _libusb_device_descriptor()
devices = POINTER(c_void_p)()
vid, pid, serial_number = _path_split(self._path)
sz = _lib.libusb_get_device_list(self._ctx, pointer(devices))
try:
for idx in range(sz):
device = devices[idx]
if _lib.libusb_get_device_descriptor(device, pointer(descriptor)):
continue
if vid == descriptor.idVendor and pid == descriptor.idProduct:
dh = c_void_p(None)
rv = _lib.libusb_open(device, dh)
if rv < 0:
log.info('Could not open device: %04x/%04x', vid, pid)
continue
if serial_number == _get_string_descriptor(dh, descriptor.iSerialNumber):
self._handle = dh
log.info('open: success')
return
log.warning('open:failed')
raise IOError('open:failed')
finally:
_lib.libusb_free_device_list(devices, 0)
def open(self, event_callback_fn=None):
# todo support event_callback_fn on errors
self.close()
try:
self._open()
log.info('open: configure device')
rv = _lib.libusb_set_configuration(self._handle, 1)
may_raise_ioerror(rv, 'libusb_set_configuration 1 failed')
rv = _lib.libusb_claim_interface(self._handle, 0)
may_raise_ioerror(rv, 'libusb_claim_interface 0 failed')
rv = _lib.libusb_set_interface_alt_setting(self._handle, 0, 0)
may_raise_ioerror(rv, 'libusb_set_interface_alt_setting 0,0 failed')
self._control_transfer = ControlTransferAsync(self._handle)
log.info('open: done')
except IOError:
log.exception('open failed: %s', self._path)
self.close()
raise
except Exception as ex:
log.exception('open failed: %s', self._path)
self.close()
raise IOError(ex)
def _abort_endpoints(self):
waiting = []
if self._control_transfer is not None:
self._control_transfer.close()
waiting.append(self._control_transfer)
for endpoint in self._endpoints.values():
endpoint.stop()
waiting.append(endpoint)
time_start = time.time()
while True:
if all([not x.is_busy for x in waiting]):
break
dt = time.time() - time_start
if dt > 0.25 + TRANSFER_TIMEOUT_MS / 1000:
log.warning('Could not shut down gracefully')
break
timeval = TimeVal(tv_sec=0, tv_usec=25000)
_lib.libusb_handle_events_timeout(self._ctx, byref(timeval))
self._control_transfer = None
self._endpoints.clear()
def close(self, status=None, message=None):
log.info('close')
self._abort_endpoints()
if self._handle and not self._removed:
handle, self._handle = self._handle, c_void_p(None)
_lib.libusb_close(handle)
if self._ctx:
ctx, self._ctx = self._ctx, None
_libusb_context_destroy(ctx)
event_callback_fn, self._event_callback_fn = self._event_callback_fn, None
if status is not None and callable(event_callback_fn):
message = '' if message is None else str(message)
try:
event_callback_fn(status, message)
except Exception:
log.exception('while in _event_callback_fn')
def _control_transfer_pend(self, cbk_fn, setup_packet, data):
if self._control_transfer is None:
rsp = usb_core.ControlTransferResponse(setup_packet, TransferStatus.NO_DEVICE, None)
cbk_fn(rsp)
return False
return self._control_transfer.pend(cbk_fn, setup_packet, data)
def control_transfer_out(self, cbk_fn, recipient, type_, request, value=0, index=0, data=None):
if cbk_fn is None:
run_until_done = RunUntilDone(1.0, 'control_transfer_out')
self.control_transfer_out(run_until_done.cbk_fn, recipient, type_, request, value, index, data)
while not run_until_done.is_done():
self.process(0.01)
return run_until_done.value_args0
request_type = usb_core.RequestType(direction='out', type_=type_, recipient=recipient).u8
length = 0 if data is None else len(data)
setup_packet = usb_core.SetupPacket(request_type, request, value, index, length)
return self._control_transfer_pend(cbk_fn, setup_packet, data)
def control_transfer_in(self, cbk_fn, recipient, type_, request, value, index, length) -> ControlTransferResponse:
if cbk_fn is None:
run_until_done = RunUntilDone(1.0, 'control_transfer_in')
self.control_transfer_in(run_until_done.cbk_fn, recipient, type_, request, value, index, length)
while not run_until_done.is_done():
self.process(0.01)
return run_until_done.value_args0
request_type = usb_core.RequestType(direction='in', type_=type_, recipient=recipient).u8
setup_packet = usb_core.SetupPacket(request_type, request, value, index, length)
return self._control_transfer_pend(cbk_fn, setup_packet, None)
def read_stream_start(self, endpoint_id, transfers, block_size, data_fn, process_fn, stop_fn):
pipe_id = (endpoint_id & 0x7f) | 0x80
endpoint = self._endpoints.pop(pipe_id, None)
if endpoint is not None:
log.warning('repeated start')
endpoint.stop()
endpoint = EndpointIn(self._handle, pipe_id, transfers,
block_size, data_fn, process_fn, stop_fn)
self._endpoints[endpoint.pipe_id] = endpoint
endpoint.start()
def read_stream_stop(self, endpoint_id):
log.info('read_stream_stop %d', endpoint_id)
pipe_id = (endpoint_id & 0x7f) | 0x80
endpoint = self._endpoints.pop(pipe_id, None)
if endpoint is not None:
endpoint.stop()
def status(self):
e = {}
s = {'endpoints': e}
for endpoint in self._endpoints.values():
e[endpoint.pipe_id] = endpoint.status()
return s
def signal(self):
pass # todo, currently delays in process for up to 25 ms waiting for libusb_handle_events_timeout
def process(self, timeout=None):
if self._ctx and not self._removed:
timeval = TimeVal(tv_sec=0, tv_usec=25000)
_lib.libusb_handle_events_timeout(self._ctx, byref(timeval))
endpoints_stop = []
for endpoint in self._endpoints.values():
endpoint.process_signal()
if endpoint._state == endpoint.ST_IDLE:
endpoints_stop.append(endpoint.pipe_id)
for pipe_id in endpoints_stop:
self._endpoints.pop(pipe_id, None)
if self._control_transfer.stop_code:
msg = f'Control endpoint failed {self._control_transfer.stop_code}'
self.close(self._control_transfer.stop_code, msg)
else:
time.sleep(0.025)
class DeviceNotify:
def __init__(self, cbk):
self._cbk = cbk
self._window_thread = None
self._do_quit = True
if 0 == _lib.libusb_has_capability(Capability.HAS_HOTPLUG):
log.warning('libusb does not support hotplug')
return
self.open()
def _hotplug_cbk(self, ctx, device, ev, user_data):
inserted = bool(ev & HotplugEvent.DEVICE_ARRIVED)
removed = bool(ev & HotplugEvent.DEVICE_LEFT)
log.info('hotplug: inserted=%s, removed=%s', inserted, removed)
self._cbk(inserted, 0)
return 0
def _run(self):
if 0 == _lib.libusb_has_capability(Capability.HAS_HOTPLUG):
log.warning('libusb does not support hotplug')
# todo revert to polling method?
return
log.debug('_run_window start')
timeval = TimeVal(tv_sec=0, tv_usec=100000)
timeval_ptr = pointer(timeval)
handle = c_int()
cbk_fn = _libusb_hotplug_callback_fn(self._hotplug_cbk)
cbk_user_data = c_void_p()
with _libusb_context() as ctx:
rv = _lib.libusb_hotplug_register_callback(
ctx,
HotplugEvent.DEVICE_ARRIVED | HotplugEvent.DEVICE_LEFT,
0, # flags
HOTPLUG_MATCH_ANY, # vid
HOTPLUG_MATCH_ANY, # pid
HOTPLUG_MATCH_ANY, # device class
cbk_fn,
cbk_user_data,
byref(handle))
if rv:
raise IOError('could not register hotplug')
while not self._do_quit:
_lib.libusb_handle_events_timeout(ctx, timeval_ptr)
_lib.libusb_hotplug_deregister_callback(ctx, handle)
def open(self):
self.close()
self._do_quit = False
log.info('open')
self._window_thread = threading.Thread(name='device_notify', target=self._run)
self._window_thread.start()
def close(self):
if self._window_thread:
log.info('close')
self._do_quit = True
self._window_thread.join()
self._window_thread = None
def scan(name: str) -> List[LibUsbDevice]:
"""Scan for attached devices.
:param name: The case-insensitive name of the device to scan.
:return: The list of discovered :class:`WinUsbDevice` instances.
"""
with _libusb_context() as ctx:
paths = []
infos = INFO[name.lower()]
descriptor = _libusb_device_descriptor()
devices = POINTER(c_void_p)()
sz = _lib.libusb_get_device_list(ctx, pointer(devices))
try:
for idx in range(sz):
device = devices[idx]
if _lib.libusb_get_device_descriptor(device, pointer(descriptor)):
raise RuntimeError('descriptor')
for info in infos:
vid = info['vendor_id']
pid = info['product_id']
if vid == descriptor.idVendor and pid == descriptor.idProduct:
dh = c_void_p(None)
rv = _lib.libusb_open(device, pointer(dh))
if rv < 0 or not dh:
log.info('Could not open device: %04x/%04x', vid, pid)
continue
try:
serial_number = _get_string_descriptor(dh, descriptor.iSerialNumber)
finally:
_lib.libusb_close(dh)
path = '%04x/%04x/%s' % (vid, pid, serial_number)
paths.append(path)
finally:
_lib.libusb_free_device_list(devices, 0)
if not len(paths):
log.info('scan found no devices')
return []
log.info('scan found %s' % paths)
devices = [LibUsbDevice(p) for p in paths]
return devices
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
devices = scan('Joulescope')
print('\n'.join([str(d) for d in devices]))
if len(devices):
d = devices[0]
d.open()
rv = d.control_transfer_in('device', 'vendor', request=4,
value=0, index=0, length=128)
print(rv)
d.close()
|
test_pastebin_plugin.py
|
import logging
import re
import socket
import threading
import time
import tkinter
import traceback
from http.client import RemoteDisconnected
import pytest
import requests
from pygments.lexers import PythonLexer, TextLexer, get_lexer_by_name
from porcupine import get_main_window, utils
from porcupine.plugins.pastebin import DPaste, SuccessDialog, Termbin
# utils.run_in_thread() can make tests fragile
@pytest.fixture
def dont_run_in_thread(monkeypatch):
def func(blocking_function, done_callback, check_interval_ms=69, daemon=True):
try:
result = blocking_function()
except Exception:
done_callback(False, traceback.format_exc())
else:
done_callback(True, result)
monkeypatch.setattr(utils, "run_in_thread", func)
@pytest.mark.pastebin_test
def test_dpaste_syntax_choices():
# download the json data representing valid syntax choices linked from dpaste docs
response = requests.get("https://dpaste.com/api/v2/syntax-choices/")
response.raise_for_status()
syntax_choices = response.json()
# Skip 'json-object', it's wrong for whatever reason
del syntax_choices["json-object"]
for syntax_choice in syntax_choices.keys():
assert syntax_choice == get_lexer_by_name(syntax_choice).aliases[0]
@pytest.mark.pastebin_test
@pytest.mark.parametrize("paste_class", [DPaste, Termbin])
def test_pastebin(paste_class):
some_code = "import foo as bar\nprint('baz')"
for lexer in [TextLexer, PythonLexer]:
url = paste_class().run(some_code, lexer)
assert isinstance(url, str)
response = requests.get(url)
response.raise_for_status()
if response.text.strip().startswith("<!DOCTYPE"):
# html and regexes ftw
assert some_code in re.sub(r"<.*?>", "", response.text).replace("'", "'")
else:
# raw url
assert response.text.strip() == some_code.strip()
@pytest.mark.pastebin_test # TODO: switch to localhost HTTPS server?
def test_dpaste_canceling(monkeypatch):
monkeypatch.setattr("porcupine.plugins.pastebin.DPASTE_URL", "https://httpbin.org/delay/3")
paste = DPaste()
got_error = False
def thread_target():
nonlocal got_error
try:
paste.run("hello world", TextLexer)
except RemoteDisconnected: # the error that it raises when canceled
got_error = True
thread = threading.Thread(target=thread_target)
thread.start()
start = time.time()
time.sleep(1)
paste.cancel()
thread.join()
assert time.time() - start < 1.05
assert got_error
def test_success_dialog(mocker):
dialog = SuccessDialog("http://example.com/poop")
dialog.clipboard_append("this junk should be gone soon")
dialog.copy_to_clipboard()
assert dialog.clipboard_get() == "http://example.com/poop"
# make sure that webbrowser.open is called
mock = mocker.patch("porcupine.plugins.pastebin.webbrowser")
assert dialog.winfo_exists()
dialog.open_in_browser()
assert not dialog.winfo_exists()
mock.open.assert_called_once_with("http://example.com/poop")
dialog.destroy()
def test_lots_of_stuff_with_localhost_termbin(filetab, monkeypatch, tabmanager, dont_run_in_thread):
with socket.socket() as termbin:
termbin.settimeout(5)
termbin.bind(("localhost", 0))
termbin.listen(1)
monkeypatch.setattr(
"porcupine.plugins.pastebin.TERMBIN_HOST_AND_PORT", termbin.getsockname()
)
thread_done = False
fake_wait_window_done = False
def fake_termbin():
with termbin.accept()[0] as sock:
assert sock.recv(1024) == b"hello world\n"
sock.sendall(b"http://example.com/\n\0")
nonlocal thread_done
thread_done = True
thread = threading.Thread(target=fake_termbin)
thread.start()
tabmanager.select(filetab)
filetab.textwidget.insert("end", "hello world\n")
def fake_wait_window(success_dialog):
assert success_dialog.title() == "Pasting Succeeded"
assert success_dialog.url == "http://example.com/"
success_dialog.destroy()
nonlocal fake_wait_window_done
fake_wait_window_done = True
monkeypatch.setattr(tkinter.Toplevel, "wait_window", fake_wait_window)
get_main_window().event_generate("<<Menubar:Pastebin/termbin.com>>")
thread.join()
get_main_window().update()
assert thread_done and fake_wait_window_done
def test_paste_error_handling(monkeypatch, caplog, mocker, tabmanager, filetab, dont_run_in_thread):
monkeypatch.setattr("porcupine.plugins.pastebin.DPASTE_URL", "ThisIsNotValidUrlStart://wat")
mocker.patch("tkinter.messagebox.showerror")
tabmanager.select(filetab)
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
tkinter.messagebox.showerror.assert_called_once_with(
"Pasting failed", "Check your internet connection or try a different pastebin."
)
def test_invalid_return(filetab, tabmanager, mocker, caplog):
mocker.patch("tkinter.messagebox.showerror")
mocker.patch("porcupine.plugins.pastebin.DPaste.run").return_value = "lol"
tabmanager.select(filetab)
get_main_window().event_generate("<<Menubar:Pastebin/dpaste.com>>")
get_main_window().update()
tkinter.messagebox.showerror.assert_called_once_with(
"Pasting failed", "Instead of a valid URL, dpaste.com returned 'lol'."
)
assert caplog.record_tuples == [
(
"porcupine.plugins.pastebin",
logging.ERROR,
"pastebin 'dpaste.com' returned invalid url: 'lol'",
)
]
|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import random
import os
import json
import testdata
from unittest import TestSuite
from endpoints.compat import *
from endpoints.client import WebClient, WebsocketClient
from .. import TestCase as BaseTestCase
class TestCase(BaseTestCase):
server = None
server_class = None # this should be a client.Server class
client_class = WebClient
def setUp(self):
if self.server:
self.server.stop()
def tearDown(self):
if self.server:
self.server.stop()
def create_server(self, contents, config_contents='', **kwargs):
tdm = testdata.create_module(kwargs.get("controller_prefix", ""), contents)
kwargs["controller_prefix"] = tdm
kwargs["host"] = self.get_host()
kwargs["cwd"] = tdm.basedir
if config_contents:
config_path = testdata.create_file("{}.py".format(testdata.get_module_name()), config_contents)
kwargs["config_path"] = config_path
server = self.server_class(**kwargs)
server.stop()
server.start()
self.server = server
return server
def create_client(self, **kwargs):
kwargs.setdefault("host", self.server.host)
client = self.client_class(**kwargs)
return client
class WebTestCase(TestCase):
def test_body_plain_with_content_type(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, **kwargs):",
" self.response.headers['content-type'] = 'text/plain'",
" return self.request.body.read()",
])
body = "plain text body"
c = self.create_client(headers={"content-type": "text/plain"})
r = c.post("/", body)
self.assertEqual(200, r.code)
self.assertEqual(body, r.body)
self.assertEqual(String(body), String(r._body))
def test_body_plain_without_content_type(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, **kwargs):",
" self.response.headers['content-type'] = 'text/plain'",
" return self.request.body.read()",
])
body = "plain text body"
#c = self.create_client(headers={"content-type": "text/plain"})
c = self.create_client()
r = c.post("/", body)
self.assertEqual(200, r.code)
self.assertEqual(body, r.body)
self.assertEqual(String(body), String(r._body))
def test_body_json_dict(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, *args, **kwargs):",
" return {'args': args, 'kwargs': kwargs}",
])
c = self.create_client(json=True)
body = {"foo": 1, "bar": [2, 3], "che": "four"}
r = c.post("/", body)
self.assertEqual(body, r._body["kwargs"])
self.assertEqual(0, len(r._body["args"]))
def test_body_json_list(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, *args, **kwargs):",
" return {'args': args, 'kwargs': kwargs}",
])
c = self.create_client(json=True)
body = ["foo", "bar"]
r = c.post("/", body)
self.assertEqual(body, r._body["args"])
self.assertEqual(0, len(r._body["kwargs"]))
body = [{"foo": 1}, {"foo": 2}]
r = c.post("/", body)
self.assertEqual(1, r._body["args"][0]["foo"])
self.assertEqual(2, len(r._body["args"]))
def test_body_file_1(self):
filepath = testdata.create_file("filename.txt", "this is a text file to upload")
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, *args, **kwargs):",
" return kwargs['file'].filename",
"",
])
c = self.create_client()
r = c.post_file('/', {"foo": "bar", "baz": "che"}, {"file": filepath})
self.assertEqual(200, r.code)
self.assertTrue("filename.txt" in r.body)
def test_body_file_2(self):
"""make sure specifying a @param for the file upload works as expected"""
filepath = testdata.create_file("post_file_with_param.txt", "post_file_with_param")
server = self.create_server(contents=[
"from endpoints import Controller, decorators",
"class Default(Controller):",
" @decorators.param('file')",
" def POST(self, *args, **kwargs):",
" return kwargs['file'].filename",
#" return kwargs['file']['filename']",
"",
])
c = self.create_client()
r = c.post_file('/', {"foo": "bar", "baz": "che"}, {"file": filepath})
self.assertEqual(200, r.code)
self.assertTrue("post_file_with_param.txt" in r.body)
def test_request_url(self):
"""make sure request url gets controller_path correctly"""
server = self.create_server(contents=[
"from endpoints import Controller",
"class Requrl(Controller):",
" def GET(self):",
" return self.request.url.controller()",
"",
])
c = self.create_client()
r = c.get('/requrl')
self.assertTrue("/requrl" in r._body)
def test_list_param_decorator(self):
server = self.create_server(contents=[
"from endpoints import Controller, decorators",
"class Listparamdec(Controller):",
" @decorators.param('user_ids', 'user_ids[]', type=int, action='append_list')",
" def GET(self, **kwargs):",
" return int(''.join(map(str, kwargs['user_ids'])))",
""
])
c = self.create_client()
r = c.get('/listparamdec?user_ids[]=12&user_ids[]=34')
self.assertEqual("1234", r.body)
def test_post_basic(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"from endpoints.decorators import version",
"class Default(Controller):",
" def GET(*args, **kwargs): pass",
" @version('', 'v1')",
" def POST_v1(*args, **kwargs): pass",
" @version('v2')",
" def POST_v2(*args, **kwargs): return kwargs['foo']",
"",
])
c = self.create_client()
r = c.post(
'/',
{"foo": "bar"},
headers={
"content-type": "application/json",
"Accept": "application/json;version=v2"
}
)
self.assertEqual(200, r.code)
self.assertEqual('"bar"', r.body)
r = c.post('/', {})
self.assertEqual(204, r.code)
r = c.post('/', None, headers={"content-type": "application/json"})
self.assertEqual(204, r.code)
r = c.post('/', None)
self.assertEqual(204, r.code)
r = c.post('/', {}, headers={"content-type": "application/json"})
self.assertEqual(204, r.code)
r = c.post('/', {"foo": "bar"}, headers={"Accept": "application/json;version=v2"})
self.assertEqual(200, r.code)
self.assertEqual('"bar"', r.body)
def test_404_request(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Foo(Controller):",
" def GET(self, **kwargs): pass",
"",
])
c = self.create_client()
r = c.get('/foo/bar/baz?che=1&boo=2')
self.assertEqual(404, r.code)
def test_response_headers(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self):",
" self.response.set_header('FOO_BAR', 'check')",
"",
])
c = self.create_client()
r = c.get('/')
self.assertEqual(204, r.code)
self.assertTrue("foo-bar" in r.headers)
def test_file_stream(self):
content = "this is a text file to stream"
filepath = testdata.create_file("filename.txt", content)
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self, *args, **kwargs):",
" f = open('{}')".format(filepath),
" self.response.set_header('content-type', 'text/plain')",
" return f",
"",
])
c = self.create_client()
r = c.get('/')
self.assertEqual(200, r.code)
self.assertEqual(content, r.body)
#self.assertTrue(r.body)
def test_generators(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self):",
" for x in range(100):",
" yield x",
])
c = self.create_client()
r = c.get('/')
content = list(range(100))
self.assertEqual(200, r.code)
self.assertEqual(content, r._body)
def test_response_body_1(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def POST(self, **kwargs):",
" content_type = '{};charset={}'.format(kwargs['content_type'], self.encoding)",
" self.response.set_header('content-type', content_type)",
" return kwargs['body']",
])
body = {'foo': testdata.get_words()}
c = self.create_client(json=True)
r = c.post('/', {'content_type': 'plain/text', 'body': body})
self.assertEqual(ByteString(body), r._body)
self.assertEqual(String(body), r.body)
r = c.post('/', {'content_type': 'application/json', 'body': body})
self.assertEqual(json.dumps(body), r.body)
r = c.post('/', {'content_type': 'application/json', 'body': {}})
self.assertEqual("{}", r.body)
def test_response_body_json_error(self):
"""I was originally going to have the body method smother the error, but
after thinking about it a little more, I think it is better to bubble up
the error and rely on the user to handle it in their code"""
# 1-13-2021 update, turns out response body is buried, an error is
# raised but the server returns a 200 because the headers are already
# sent before the body is encoded, so all the headers are sent but body
# is empty
self.skip_test("")
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self):",
" class Foo(object): pass",
" return {'foo': Foo()}",
])
c = self.create_client()
r = c.get('/')
pout.v(r.code, r.body)
return
self.skip_test("moved from http.ResponseTest, make this work at some point")
class Foo(object): pass
b = {'foo': Foo()}
r = Response()
r.headers['Content-Type'] = 'application/json'
r.body = b
with self.assertRaises(TypeError):
rb = r.body
class WebsocketTestCase(TestCase):
client_class = WebsocketClient
server_class = None
def test_bad_path(self):
"""https://github.com/Jaymon/endpoints/issues/103"""
server = self.create_server(contents=[
"import os",
"from endpoints import Controller, decorators",
"class Default(Controller):",
" def CONNECT(self, **kwargs):",
" pass",
" def DISCONNECT(self, **kwargs):",
" pass",
" def GET(self, foo, bar):",
" return 'get'",
"",
])
c = self.create_client()
c.connect()
r = c.get("http://example.com/foo/bar")
self.assertEqual(404, r.code)
r = c.get("/foo/bar")
self.assertEqual(200, r.code)
def test_close_connection(self):
server = self.create_server(contents=[
"from endpoints import Controller, CloseConnection",
"class Default(Controller):",
" def CONNECT(self, **kwargs):",
" pass",
" def DISCONNECT(self, **kwargs):",
" pass",
" def GET(self, **kwargs):",
" raise CloseConnection()",
])
c = self.create_client()
c.connect()
with self.assertRaises(RuntimeError):
c.get("/", timeout=0.1, attempts=1)
def test_rapid_requests(self):
"""We were dropping requests when making a whole bunch of websocket
requests all at once, a version of this test was able to duplicate it about
every 5 or 6 run (dang async programming) which allowed me to figure out
that uwsgi batches ws requests and if you don't read them all then it will
silently discard the unread ones when another request is received"""
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def CONNECT(self, **kwargs):",
" pass",
" def DISCONNECT(self, **kwargs):",
" pass",
" def GET(self, **kwargs):",
" return kwargs['pid']",
])
c = self.create_client()
c.connect()
# we are basically going to do Y sets of X requests, if any of them
# stall then we failed this test, otherwise we succeeded
for y in range(5):
ts = []
rs = []
for x in range(5):
def target(x):
r = c.get("/", {"pid": x})
rs.append(int(r.body))
t = testdata.Thread(target=target, args=[x])
t.start()
ts.append(t)
for t in ts:
t.join()
self.assertEqual(set([0, 1, 2, 3, 4]), set(rs))
def test_path_mixup(self):
"""Jarid was hitting this problem, we were only able to get it to happen
consistently with his environment, the problem stemmed from one request
being remembered on the next request, this makes sure that is fixed"""
server = self.create_server(contents=[
"from endpoints import Controller",
"from endpoints.decorators import version",
"class Default(Controller):",
" def CONNECT(self, **kwargs):",
" pass",
" def DISCONNECT(self, **kwargs):",
" pass",
" def GET(*args, **kwargs):"
" return 'Default.GET'",
"",
"class Foo(Controller):",
" def POST(*args, **kwargs):",
" return 'Foo.POST'",
])
c = self.create_client()
c.connect()
r = c.post("/foo")
self.assertEqual(200, r.code)
self.assertTrue("Foo.POST" in r.body)
def test_versioning(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"from endpoints.decorators import version",
"class Default(Controller):",
" def CONNECT(self, **kwargs):",
" pass",
" def DISCONNECT(self, **kwargs):",
" pass",
" @version('', 'v1')",
" def GET_v1(*args, **kwargs): return 'v1'",
" @version('v2')",
" def GET_v2(*args, **kwargs): return 'v2'",
])
c = self.create_client()
c.connect()
r = c.get(
'/',
headers={
"Accept": "application/json;version=v1"
}
)
self.assertEqual(200, r.code)
self.assertTrue("v1" in r.body)
r = c.get(
'/',
headers={
"Accept": "application/json;version=v2"
}
)
self.assertEqual(200, r.code)
self.assertTrue("v2" in r.body)
r = c.get('/')
self.assertEqual(200, r.code)
self.assertTrue("v1" in r.body)
def test_connect_on_fetch(self):
server = self.create_server(contents=[
"from endpoints import Controller, CallError",
"class Confetch(Controller):",
" def CONNECT(self, **kwargs):",
" if int(kwargs['foo']) != 1:",
" raise CallError(400)",
" def GET(self, **kwargs):",
" pass",
])
c = self.create_client()
r = c.get("/confetch", {"foo": 1, "bar": 2})
self.assertEqual(204, r.code)
c = self.create_client()
with self.assertRaises(RuntimeError):
r = c.get("/confetch", {"foo": 2})
def test_get_fetch_host(self):
client_cls = self.client_class
c = client_cls("http://localhost")
self.assertTrue(c.get_fetch_host().startswith("ws"))
c = client_cls("https://localhost")
self.assertTrue(c.get_fetch_host().startswith("wss"))
c = client_cls("HTTPS://localhost")
self.assertTrue(c.get_fetch_host().startswith("wss"))
c = client_cls("HTTP://localhost")
self.assertTrue(c.get_fetch_host().startswith("ws"))
def test_connect_success(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs):",
" pass",
" def DISCONNECT(self, *args, **kwargs):",
" pass",
])
c = self.create_client()
r = c.connect(trace=True)
self.assertEqual(204, r.code)
self.assertTrue(c.connected)
# when looking at logs, this test looks like there is a problem because
# right after connection an IOError is thrown, that's because the close
# will cause uWSGI to raise an IOError, giving the websocket a chance
# to clean up the connection
c.close()
self.assertFalse(c.connected)
def test_connect_failure(self):
server = self.create_server(contents=[
"from endpoints import Controller, CallError",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs):",
" raise CallError(401, 'this is the message')",
" def DISCONNECT(self, *args, **kwargs):",
" pass",
])
c = self.create_client()
with self.assertRaises(IOError):
c.connect()
def test_request_basic(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs):",
" #pout.v(args, kwargs, self.request)",
" #pout.b('CONNECT')",
" pass",
" def DISCONNECT(self, *args, **kwargs):",
" #pout.b('DISCONNECT')",
" pass",
"",
" def SOCKET(self, *args, **kwargs):",
" #pout.v(args, kwargs)",
" #pout.b('SOCKET')",
" return {",
" 'name': 'SOCKET',",
" 'args': args,",
" 'kwargs': kwargs,",
" }",
" def POST(self, *args, **kwargs):",
" return {",
" 'name': 'POST',",
" 'args': args,",
" 'kwargs': kwargs,",
" }",
" def GET(self, *args, **kwargs):",
" return {",
" 'name': 'GET',",
" 'args': args,",
" 'kwargs': kwargs,",
" }",
])
c = self.create_client()
r = c.post("/foo/bar", {"val1": 1, "val2": 2})
self.assertEqual("POST", r._body["name"])
r = c.send("/foo/bar", {"val1": 1, "val2": 2})
self.assertEqual("SOCKET", r._body["name"])
self.assertEqual({"val1": 1, "val2": 2}, r._body["kwargs"])
r = c.get("/foo/bar", {"val1": 1, "val2": 2})
self.assertEqual("GET", r._body["name"])
def test_request_modification(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Default(Controller):",
" def CONNECT(self):",
" self.request.foo = 1",
"",
" def DISCONNECT(self, *args, **kwargs):",
" pass",
"",
" def POST(self, **kwargs):",
" self.request.parent.foo = kwargs['foo']",
" return self.request.parent.foo",
"",
" def GET(self):",
" return self.request.foo",
])
c = self.create_client()
r = c.post("/", {"foo": 2})
self.assertEqual(2, r._body)
r = c.get("/")
self.assertEqual(2, r._body)
r = c.post("/", {"foo": 4})
self.assertEqual(4, r._body)
r = c.get("/")
self.assertEqual(4, r._body)
def test_path_autoconnect(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Foo(Controller):",
" def CONNECT(self): pass",
" def DISCONNECT(self): pass",
"",
" def POST(self, **kwargs):",
" return 1",
"",
])
c = self.create_client()
c.basic_auth("foo", "bar")
r = c.post("/foo", {"bar": 2})
self.assertEqual(1, r._body)
def test_error_500(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs): pass",
" def DISCONNECT(self, *args, **kwargs): pass",
" def GET(self):",
" raise ValueError('bah')",
" def POST(self, **kwargs):",
" return 'foo'",
])
c = self.create_client()
r = c.get("/")
self.assertEqual(500, r.code)
self.assertEqual("bah", r._body["errmsg"])
r = c.post("/")
self.assertEqual(200, r.code)
def test_call_error(self):
server = self.create_server(contents=[
"from endpoints import Controller, CallError",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs): pass",
" def DISCONNECT(self, *args, **kwargs): pass",
"",
" def GET(self):",
" raise CallError(401)",
])
c = self.create_client()
r = c.get("/")
self.assertEqual(401, r.code)
def test_connect_error(self):
server = self.create_server(contents=[
"from endpoints import Controller, CallError",
"from endpoints.decorators.auth import client_auth as BaseAuth",
"class client_auth(BaseAuth):",
" def target(self, *args, **kwargs): return False",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs):",
" auth = client_auth()",
" auth.handle_target(self.request, args, kwargs)",
"",
" def DISCONNECT(self, *args, **kwargs): pass",
"",
])
c = self.create_client()
with self.assertRaises(IOError):
c.connect()
#r = c.get("/")
def test_get_query(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs): pass",
" def DISCONNECT(self, *args, **kwargs): pass",
"",
" def GET(self, **kwargs):",
" return kwargs['foo']",
])
c = self.create_client()
r = c.get("/", {"foo": 2})
self.assertEqual(200, r.code)
self.assertEqual(2, r._body)
def test_count(self):
self.skip_test("count is no longer being sent down")
server = self.create_server(contents=[
"from endpoints import Controller",
"",
"class Default(Controller):",
" def CONNECT(self, *args, **kwargs): pass",
" def DISCONNECT(self, *args, **kwargs): pass",
"",
" def GET(self, **kwargs): pass",
" def POST(self, **kwargs): pass",
])
c = self.create_client()
for x in range(2, 7):
r = getattr(c, random.choice(["get", "post"]))("/")
self.assertEqual(204, r.code)
self.assertEqual(x, r.count)
c.close()
for x in range(2, 7):
r = getattr(c, random.choice(["get", "post"]))("/")
self.assertEqual(204, r.code)
self.assertEqual(x, r.count)
class WebServerTestCase(TestCase):
"""Tests the client.Webserver for the interface"""
def test_start(self):
server = self.create_server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self): pass",
"",
])
def test_file(self):
server = self.create_server(
contents=[
"import os",
"from endpoints import Controller, decorators",
"class Default(Controller):",
" def GET(self):",
" return os.environ['WSGI_TESTING']",
"",
],
config_contents=[
"import os",
"os.environ['WSGI_TESTING'] = 'foo bar'",
"",
]
)
c = self.create_client()
r = c.get("/")
self.assertEqual(200, r.code)
self.assertEqual("foo bar", r._body)
def load_tests(*args, **kwargs):
return TestSuite()
|
try_threading.py
|
"""
A basic example of how to use threading to generate images.
This gives only a small improvement and I'm still trying
to figure out why.
"""
import queue
import threading
from mandelpy import create_image, Settings, power
from PIL import ImageFilter
images_folder = r"..\images\increasing_powers5"
video_file = r"..\test5.mp4"
# The queue for tasks
q = queue.Queue()
# Worker, handles each task
def worker():
while True:
i = q.get()
if i is None:
break
n = 1 + i/50
p = (n - 2) * abs(n - 2) + 2
settings = Settings(transform=lambda z: power(z, p) + 1.5 * z - 0.5 - 0.25j,
width=2000, height=2000,
block_size=(1000, 1000),
mirror_x=False)
img = create_image(settings, verbose=True)
img = img.filter(ImageFilter.GaussianBlur(1))
img = img.resize((1920, 1080))
img.save(rf"{images_folder}\Pic{i}.jpg", optimize=True, quality=90)
q.task_done()
def start_workers(worker_pool=1000):
threads = []
for i in range(worker_pool):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
return threads
def stop_workers(threads):
# stop workers
for i in threads:
q.put(None)
for t in threads:
t.join()
def create_queue(task_items):
for item in task_items:
q.put(item)
if __name__ == "__main__":
# Dummy tasks
tasks = [item for item in range(100)]
# Start up your workers
workers = start_workers(worker_pool=2)
create_queue(tasks)
# Blocks until all tasks are complete
q.join()
stop_workers(workers)
|
hand_detector_utils.py
|
#Victor Dibia, HandTrack: A Library For Prototyping Real-time Hand TrackingInterfaces using Convolutional Neural Networks,
#https://github.com/victordibia/handtracking
#Apache Licence. Copyright (c) 2017 Victor Dibia.
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from handdetector import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
#sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'handdetector/hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'handdetector/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'handdetector/hand_label_map.pbtxt'
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("SSD hand detector frozen graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
gamestate.py
|
import discord
import random
import time
import names
import re
import math
import asyncio
from statemachine import StateMachine, State
from threading import Thread
from mafia.misc.utils import Misc, Timers, Alignment
class DelayedOperation(Thread):
TIME_MESSAGE_BEFORE_END = 10
MSG_TEMPLATE = "*{}: {} secondes restantes...*"
def __init__(self, timer, operation, title: str = None, send_message_fnc=None):
"""
Initializer
:param timer: a duration before operation, in seconds
:param operation: a function to execute after the timer
:param title: string displayed as timer name
:param send_message_fnc: function used to send a message to everyone
"""
super().__init__()
self._timer = timer
self._operation = operation
self._title = title
self._send_message_fnc = send_message_fnc
self._disabled = False
def run(self):
"""
Run thread
"""
if self._title is not None and \
self._send_message_fnc is not None and \
self._timer < self.TIME_MESSAGE_BEFORE_END:
self._send_message_fnc(self.MSG_TEMPLATE.format(self._title, int(self._timer)))
init_time = time.time()
while time.time() - init_time < self._timer and not self._disabled:
time.sleep(1.0)
if self._title is not None and self._send_message_fnc is not None:
if int(self._timer - round(time.time() - init_time)) == self.TIME_MESSAGE_BEFORE_END:
self._send_message_fnc(self.MSG_TEMPLATE.format(self._title,
int(self._timer - round(time.time() - init_time))))
if not self._disabled:
# Execute operation
self._operation()
def disable(self):
"""
Disable task
"""
self._disabled = True
class GameState(StateMachine):
"""
Game engine to handler games session
"""
# Game states
state_wait_for_players = State('WaitForPlayers', initial=True)
state_players_nicknames = State('PlayersNicknames')
state_configure_players = State('ConfigurePlayers')
state_day_discussion = State('DayDiscussion')
state_day_vote = State('DayDiscussion')
state_day_trial_launch = State('DayTrialLaunch')
state_day_trial_defense = State('DayTrialDefense')
state_day_trial_deliberation = State('DayTrialDeliberation')
state_day_trial_verdict = State('DayTrialVerdict')
state_day_trial_last_words = State('DayTrialLastWords')
state_day_trial_kill = State('DayTrialKill')
state_day_end = State('DayEnd')
state_night = State("Night")
state_night_sequence = State("NightSequence")
# Transitions between states
reset = state_wait_for_players.from_(state_wait_for_players,
state_players_nicknames,
state_configure_players,
state_day_discussion,
state_night)
select_names = state_wait_for_players.to(state_players_nicknames)
configure_players = state_players_nicknames.to(state_configure_players)
day_discussion = state_day_discussion.from_(state_configure_players, state_night_sequence)
day_vote = state_day_vote.from_(state_day_discussion, state_day_trial_verdict)
day_trial_launch = state_day_vote.to(state_day_trial_launch)
day_trial_defense = state_day_trial_launch.to(state_day_trial_defense)
day_trial_deliberation = state_day_trial_defense.to(state_day_trial_deliberation)
day_trial_verdict = state_day_trial_deliberation.to(state_day_trial_verdict)
day_trial_last_words = state_day_trial_verdict.to(state_day_trial_last_words)
day_trial_kill = state_day_trial_last_words.to(state_day_trial_kill)
day_end = state_day_end.from_(state_day_discussion, state_day_vote, state_day_trial_kill)
night = state_night.from_(state_day_end)
night_sequence = state_night.to(state_night_sequence)
def __init__(self, bot, mafia_engine):
super().__init__()
self._bot = bot
self._mafia_engine = mafia_engine
self._loop = asyncio.get_event_loop()
self._current_day = 0
self._next_state = None
def _send_message(self, message):
channel = self._bot.get_channel(775453457708482622)
asyncio.run_coroutine_threadsafe(channel.send(message), self._loop)
def disable_next_state(self):
"""
Used to disable configured next state
"""
if self._next_state is not None:
self._next_state.disable()
def on_reset(self):
"""
Called when state_reset state is set
"""
print("Reset !")
def on_select_names(self):
"""
Called when state_select_names state is set
"""
print("on_select_names")
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("Lancement de la partie. "
"Configurez un pseudo personnalisé avec la commande '-VOTRE_PSEUDO'.")
self._next_state = DelayedOperation(Timers.TIMER_SELECT_NICKNAME,
self.configure_players,
"Choix des pseudos",
self._mafia_engine.send_message_everyone)
self._next_state.start()
def _on_configure_players_operations(self):
"""
Function to configure players with precise timing. Include sleeps, must be threaded !
"""
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
time.sleep(2.0)
self._mafia_engine.send_message_everyone("Répartition des rôles. Vous êtes...")
time.sleep(2.0)
self._mafia_engine.configure_players()
time.sleep(3.0)
# Go to first day !
next_state = DelayedOperation(3.0, self.day_discussion)
next_state.start()
def on_configure_players(self):
"""
Called when state_configure_players state is set
"""
print("on_configure_players")
operation = Thread(target=self._on_configure_players_operations)
operation.start()
def on_day_discussion(self):
"""
Called when state_day_discussion state is set
"""
print("on_day_discussion")
# Increase day counter
self._current_day += 1
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("**JOUR {}** - Discussion - {} secondes"
.format(self._current_day, Timers.TIME_DAY_CHAT))
if self._current_day == 1:
next_state = self.day_end
else:
next_state = self.day_vote
self._next_state = DelayedOperation(Timers.TIME_DAY_CHAT,
next_state,
"Discussion",
self._mafia_engine.send_message_everyone)
self._next_state.start()
def on_day_vote(self):
"""
Called when state_day_vote state is set
"""
print("on_day_vote")
self._mafia_engine.send_message_everyone("*Vous pouvez désormais voter pour démarrer un procès (utilisez '-vote X' pour voter contre quelqu'un).*")
self._next_state = DelayedOperation(Timers.TIME_DAY_VOTE,
self.day_end,
"Vote",
self._mafia_engine.send_message_everyone)
self._next_state.start()
def _on_day_trial_launch_operations(self):
"""
Function to run the trial beginning
"""
self._mafia_engine.send_message_everyone("*La ville a décidé d'envoyer {} au procès.*"
.format(self._mafia_engine.player_trial.get_nickname()))
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
time.sleep(3.0)
self._mafia_engine.send_message_everyone("**Procès de **{}"
.format(self._mafia_engine.player_trial.get_nickname()))
# Launch trial defense
self.day_trial_defense()
def on_day_trial_launch(self):
"""
Called when state_day_trial_launch state is set
"""
print("on_day_trial_launch")
operation = Thread(target=self._on_day_trial_launch_operations)
operation.start()
def on_day_trial_defense(self):
"""
Called when state_day_trial_defense state is set
"""
time.sleep(1.0)
msg = "*{}, vous êtes jugé pour conspiration contre la ville. Quelle est votre défense ?* - {} secondes"\
.format(self._mafia_engine.player_trial.get_nickname(), Timers.TIME_DAY_TRIAL_DEFENSE)
self._mafia_engine.send_message_everyone(msg)
# Wait and go to trial deliberation
self._next_state = DelayedOperation(Timers.TIME_DAY_TRIAL_DEFENSE,
self.day_trial_deliberation)
self._next_state.start()
def on_day_trial_deliberation(self):
"""
Called when state_day_trial_deliberation state is set
"""
msg = "*La ville doit maintenant déterminer le sort de {}. '-innocent' pour innocent, '-guilty' pour coupable, '-cancel' pour annuler.* - {} secondes"\
.format(self._mafia_engine.player_trial.get_nickname(), Timers.TIME_DAY_TRIAL_DELIBERATION)
self._mafia_engine.send_message_everyone(msg)
self._next_state = DelayedOperation(Timers.TIME_DAY_TRIAL_DELIBERATION,
self.day_trial_verdict)
self._next_state.start()
def _on_day_trial_verdict_operations(self):
"""
Function to run the trial verdict
"""
self._mafia_engine.send_message_everyone("*Fin des délibérations*")
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
time.sleep(2.0)
self._mafia_engine.send_message_everyone("*Le procès est terminé. Les votes vont être comptés.*")
time.sleep(2.0)
# Compute the verdict
guilty = 0
innocent = 0
verdict_msg = ""
for player in self._mafia_engine.players:
player_vote = player.get_trial_vote()
if player_vote == Misc.TRIAL_GUILTY:
guilty += 1
verdict_msg += "*[{} a voté **Coupable**]*\n".format(player.get_nickname())
elif player_vote == Misc.TRIAL_INNOCENT:
innocent += 1
verdict_msg += "*[{} a voté **Innocent**]*\n".format(player.get_nickname())
else:
verdict_msg += "*[{} s'est abstenu]*\n".format(player.get_nickname())
if guilty > innocent:
# Execute the player
verdict_msg = "*La ville a décidé de lyncher {} par un vote de {} coupable(s) contre {} innocent(s).*\n"\
.format(self._mafia_engine.player_trial.get_nickname(), guilty, innocent) + verdict_msg
self._mafia_engine.send_message_everyone(verdict_msg)
# Kill the player !
self.day_trial_last_words()
else:
# Player saved by the town
verdict_msg = "*La ville a décidé de sauver {} par un vote de {} coupable(s) contre {} innocent(s).*\n"\
.format(self._mafia_engine.player_trial.get_nickname(), guilty, innocent) + verdict_msg
self._mafia_engine.send_message_everyone(verdict_msg)
self._mafia_engine.player_trial = None
time.sleep(2.0)
# Return to day_vote
self.day_vote()
def on_day_trial_verdict(self):
"""
Called when state_day_trial_verdict state is set
"""
print("on_day_trial_verdict")
operation = Thread(target=self._on_day_trial_verdict_operations)
operation.start()
def on_day_trial_last_words(self):
"""
Called when state_trial_last_words state is set
"""
print("on_day_trial_last_words")
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("*Un dernier mot ?*")
self._next_state = DelayedOperation(Timers.TIME_DAY_TRIAL_LAST_WORDS, self.day_trial_kill)
self._next_state.start()
def _on_day_trial_kill_operation(self):
"""
Function to run the trial verdict
"""
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("*Exécution de {}...*".format(self._mafia_engine.player_trial.get_nickname()))
time.sleep(2.0)
self._mafia_engine.player_trial.set_dead()
self._mafia_engine.send_message_everyone("*{} est mort.*".format(self._mafia_engine.player_trial.get_nickname()))
time.sleep(2.0)
msg = "*{} était **{}**.*".format(self._mafia_engine.player_trial.get_nickname(),
self._mafia_engine.player_trial.get_role().name)
self._mafia_engine.send_message_everyone(msg)
time.sleep(1.0)
self._mafia_engine.send_message_everyone("*## Derniers mots*\n{}".format(self._mafia_engine.player_trial.get_last_will()))
time.sleep(2.0)
self._mafia_engine.player_trial = None
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self.day_end()
def on_day_trial_kill(self):
"""
Called when state_day_trial_kill state is set
"""
print("on_day_trial_kill")
operation = Thread(target=self._on_day_trial_kill_operation)
operation.start()
def _on_day_end_operations(self):
"""
Function to run the end of the day
"""
self._mafia_engine.send_message_everyone("*Fin de la journée, revoyons-nous demain.*")
self._next_state = DelayedOperation(Timers.TIME_DAY_END, self.night)
self._next_state.start()
def on_day_end(self):
"""
Called when state_day_end state is set
"""
print("on_day_end")
operation = Thread(target=self._on_day_end_operations)
operation.start()
def _on_night_operations(self):
"""
Function to run the night
"""
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("**NUIT {}** - {} secondes"
.format(self._current_day, Timers.TIME_NIGHT))
for player in self._mafia_engine.players:
if player.get_role().alignment == Alignment.MAFIA:
# Display he can speak to the mafia
player.send_message_to_player("*Vous pouvez discuter avec les autres membres de la Mafia.*")
# Wait and go to night resolution !
self._next_state = DelayedOperation(Timers.TIME_NIGHT,
self.night_sequence,
"Nuit",
self._mafia_engine.send_message_everyone)
self._next_state.start()
def on_night(self):
"""
Called when state_night state is set
"""
print("on_night")
operation = Thread(target=self._on_night_operations)
operation.start()
def _on_night_sequence_operations(self):
"""
Function to run the night sequence operations
"""
self._mafia_engine.send_message_everyone("*Fin de la nuit...*")
self._mafia_engine.send_message_everyone(Misc.STATES_STRING_SEPARATOR)
self._mafia_engine.send_message_everyone("**Que s'est-il passé pendant la nuit ?**")
# TODO...
self._next_state = DelayedOperation(3.0, self.day_discussion)
self._next_state.start()
def on_night_sequence(self):
"""
Called when state_night_sequence state is set
"""
print("on_night_sequence")
operation = Thread(target=self._on_night_sequence_operations)
operation.start()
def get_current_day(self) -> int:
"""
Get the current day ID
:return: current day number
"""
return self._current_day
|
system_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""System test library, provides tools for tests that start multiple processes,
with special support for skupper-router processes.
Features:
- Create separate directories for each test.
- Save logs, sub-process output, core files etc.
- Automated clean-up after tests: kill sub-processes etc.
- Tools to manipulate router configuration files.
- Sundry other tools.
"""
import __main__
import errno
import fcntl
import json
import logging
import os
import pathlib
import queue as Queue
import random
import re
import shutil
import socket
import subprocess
import sys
import time
import unittest
import uuid
from copy import copy
from datetime import datetime
from subprocess import PIPE, STDOUT
from threading import Event
from threading import Thread
from typing import Callable, TextIO, List, Optional, Tuple
import proton
import proton.utils
from proton import Delivery
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import AtLeastOnce, Container
from proton.reactor import AtMostOnce
from skupper_router.management.client import Node
from skupper_router.management.error import NotFoundStatus
# Optional modules
MISSING_MODULES = []
try:
import qpidtoollibs
except ImportError as err:
qpidtoollibs = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
try:
import qpid_messaging as qm
except ImportError as err:
qm = None # pylint: disable=invalid-name
MISSING_MODULES.append(str(err))
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
"""True if fpath is executable"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir = os.path.split(program)[0]
if mydir:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# The directory where this module lives. Used to locate static configuration files etc.
DIR = os.path.dirname(__file__)
def _check_requirements():
"""If requirements are missing, return a message, else return empty string."""
missing = MISSING_MODULES
required_exes = ['skrouterd']
missing += ["No exectuable %s" % e for e in required_exes if not find_exe(e)]
if missing:
return "%s: %s" % (__name__, ", ".join(missing))
MISSING_REQUIREMENTS = _check_requirements()
def retry_delay(deadline, delay, max_delay):
"""For internal use in retry. Sleep as required
and return the new delay or None if retry should time out"""
remaining = deadline - time.time()
if remaining <= 0:
return None
time.sleep(min(delay, remaining))
return min(delay * 2, max_delay)
# Valgrind significantly slows down the response time of the router, so use a
# long default timeout
TIMEOUT = float(os.environ.get("QPID_SYSTEM_TEST_TIMEOUT", 60))
def retry(function: Callable[[], bool], timeout: float = TIMEOUT, delay: float = .001, max_delay: float = 1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns or None if timeout expires.
"""
deadline = time.time() + timeout
while True:
ret = function()
if ret:
return ret
else:
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
return None
def retry_exception(function, timeout=TIMEOUT, delay=.001, max_delay=1, exception_test=None):
"""Call function until it returns without exception or timeout expires.
Double the delay for each retry up to max_delay.
Calls exception_test with any exception raised by function, exception_test
may itself raise an exception to terminate the retry.
Returns what function returns if it succeeds before timeout.
Raises last exception raised by function on timeout.
"""
deadline = time.time() + timeout
while True:
try:
return function()
except Exception as e: # pylint: disable=broad-except
if exception_test:
exception_test(e)
delay = retry_delay(deadline, delay, max_delay)
if delay is None:
raise
def get_local_host_socket(socket_address_family='IPv4'):
if socket_address_family == 'IPv4':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
elif socket_address_family == 'IPv6':
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = '::1'
return s, host
def check_port_refuses_connection(port, socket_address_family='IPv4'):
"""Return true if connecting to host:port gives 'connection refused'."""
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
except OSError as e:
return e.errno == errno.ECONNREFUSED
finally:
s.close()
return False
def check_port_permits_binding(port, socket_address_family='IPv4'):
"""Return true if binding to the port succeeds."""
s, _ = get_local_host_socket(socket_address_family)
host = ""
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # so that followup binders are not blocked
s.bind((host, port))
except OSError:
return False
finally:
s.close()
return True
def is_port_available(port, socket_address_family='IPv4'):
"""Return true if a new server will be able to bind to the port."""
return (check_port_refuses_connection(port, socket_address_family)
and check_port_permits_binding(port, socket_address_family))
def wait_port(port, socket_address_family='IPv4', **retry_kwargs):
"""Wait up to timeout for port (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
def check(e):
"""Only retry on connection refused"""
if not isinstance(e, socket.error) or not e.errno == errno.ECONNREFUSED:
raise
host = None
def connect():
# macOS gives EINVAL for all connection attempts after a ECONNREFUSED
# man 3 connect: "If connect() fails, the state of the socket is unspecified. [...]"
s, host = get_local_host_socket(socket_address_family)
try:
s.connect((host, port))
finally:
s.close()
try:
retry_exception(connect, exception_test=check, **retry_kwargs)
except Exception as e:
raise Exception("wait_port timeout on host %s port %s: %s" % (host, port, e))
def wait_ports(ports, **retry_kwargs):
"""Wait up to timeout for all ports (on host) to be connectable.
Takes same keyword arguments as retry to control the timeout"""
for port, socket_address_family in ports.items():
wait_port(port=port, socket_address_family=socket_address_family, **retry_kwargs)
def message(**properties):
"""Convenience to create a proton.Message with properties set"""
m = Message()
for name, value in properties.items():
getattr(m, name) # Raise exception if not a valid message attribute.
setattr(m, name, value)
return m
def skip_test_in_ci(environment_var):
env_var = os.environ.get(environment_var)
if env_var is not None:
if env_var.lower() in ['true', '1', 't', 'y', 'yes']:
return True
return False
class Process(subprocess.Popen):
"""
Popen that can be torn down at the end of a TestCase and stores its output.
"""
# Expected states of a Process at teardown
RUNNING = -1 # Still running
EXIT_OK = 0 # Exit status 0
EXIT_FAIL = 1 # Exit status 1
unique_id = 0
@classmethod
def unique(cls, name):
cls.unique_id += 1
return "%s-%s" % (name, cls.unique_id)
def __init__(self, args, name=None, expect=EXIT_OK, **kwargs):
"""
Takes same arguments as subprocess.Popen. Some additional/special args:
@param expect: Raise error if process status not as expected at end of test:
L{RUNNING} - expect still running.
L{EXIT_OK} - expect process to have terminated with 0 exit status.
L{EXIT_FAIL} - expect process to have terminated with exit status 1.
integer - expected return code
@keyword stdout: Defaults to the file name+".out"
@keyword stderr: Defaults to be the same as stdout
"""
self.name = name or os.path.basename(args[0])
self.args = args
self.expect = expect
self.outdir = os.getcwd()
self.outfile = os.path.abspath(self.unique(self.name))
self.torndown = False
with open(self.outfile + '.out', 'w') as out:
kwargs.setdefault('stdout', out)
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
super(Process, self).__init__(args, **kwargs)
with open(self.outfile + '.cmd', 'w') as f:
f.write("%s\npid=%s\n" % (' '.join(args), self.pid))
except Exception as e:
raise Exception("subprocess.Popen(%s, %s) failed: %s: %s" %
(args, kwargs, type(e).__name__, e))
def assert_running(self):
"""Assert that the process is still running"""
assert self.poll() is None, "%s: exited" % ' '.join(self.args)
def teardown(self):
"""Check process status and stop the process if necessary"""
if self.torndown:
return
self.torndown = True
def error(msg):
with open(self.outfile + '.out') as f:
raise RuntimeError("Process %s error: %s\n%s\n%s\n>>>>\n%s<<<<" % (
self.pid, msg, ' '.join(self.args),
self.outfile + '.cmd', f.read()))
status = self.poll()
if status is None: # Still running
self.terminate()
if self.expect is not None and self.expect != Process.RUNNING:
error("still running")
self.expect = 0 # Expect clean exit after terminate
status = self.wait()
if self.expect is not None and self.expect != status:
error("exit code %s, expected %s" % (status, self.expect))
class Config:
"""Base class for configuration objects that provide a convenient
way to create content for configuration files."""
def write(self, name, suffix=".conf"):
"""Write the config object to file name.suffix. Returns name.suffix."""
name = name + suffix
with open(name, 'w') as f:
f.write(str(self))
return name
class HttpServer(Process):
def __init__(self, args, name=None, expect=Process.RUNNING):
super(HttpServer, self).__init__(args, name=name, expect=expect)
class Http2Server(HttpServer):
"""A HTTP2 Server that will respond to requests made via the router."""
def __init__(self, name=None, listen_port=None, wait=True,
perform_teardown=True, cl_args=None,
server_file=None,
expect=Process.RUNNING):
self.name = name
self.listen_port = listen_port
self.ports_family = {self.listen_port: 'IPv4'}
self.cl_args = cl_args
self.perform_teardown = perform_teardown
self.server_file = server_file
self._wait_ready = False
self.args = [sys.executable, os.path.join(os.path.dirname(os.path.abspath(__file__)), self.server_file)]
if self.cl_args:
self.args += self.cl_args
super(Http2Server, self).__init__(self.args, name=name, expect=expect)
if wait:
self.wait_ready()
def wait_ready(self, **retry_kwargs):
"""
Wait for ports to be ready
"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
class Qdrouterd(Process):
"""Run a Qpid Dispatch Router Daemon"""
class Config(list, Config): # type: ignore[misc] # Cannot resolve name "Config" (possible cyclic definition) # mypy#10958
"""
A router configuration.
The Config class is a list of tuples in the following format:
[ ('section-name', {attribute-map}), ...]
where attribute-map is a dictionary of key+value pairs. Key is an
attribute name (string), value can be any of [scalar | string | dict]
When written to a configuration file to be loaded by the router:
o) there is no ":' between the section-name and the opening brace
o) attribute keys are separated by a ":" from their values
o) attribute values that are scalar or string follow the ":" on the
same line.
o) attribute values do not have trailing commas
o) The section-name and attribute keywords are written
without enclosing quotes
o) string type attribute values are not enclosed in quotes
o) attribute values of type dict are written in their JSON representation.
Fills in some default values automatically, see Qdrouterd.DEFAULTS
"""
DEFAULTS = {
'listener': {'host': '0.0.0.0', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120',
'authenticatePeer': 'no', 'role': 'normal'},
'connector': {'host': '127.0.0.1', 'saslMechanisms': 'ANONYMOUS', 'idleTimeoutSeconds': '120'},
'router': {'mode': 'standalone', 'id': 'QDR'}
}
def sections(self, name):
"""Return list of sections named name"""
return [p for n, p in self if n == name]
@property
def router_id(self): return self.sections("router")[0]["id"]
def defaults(self):
"""Fill in default values in gconfiguration"""
for name, props in self:
if name in Qdrouterd.Config.DEFAULTS:
for n, p in Qdrouterd.Config.DEFAULTS[name].items():
props.setdefault(n, p)
def __str__(self):
"""Generate config file content. Calls default() first."""
def tabs(level):
if level:
return " " * level
return ""
def value(item, level):
if isinstance(item, dict):
result = "{\n"
result += "".join(["%s%s: %s,\n" % (tabs(level + 1),
json.dumps(k),
json.dumps(v))
for k, v in item.items()])
result += "%s}" % tabs(level)
return result
return "%s" % item
def attributes(e, level):
assert(isinstance(e, dict))
# k = attribute name
# v = string | scalar | dict
return "".join(["%s%s: %s\n" % (tabs(level),
k,
value(v, level + 1))
for k, v in e.items()])
self.defaults()
# top level list of tuples ('section-name', dict)
return "".join(["%s {\n%s}\n" % (n, attributes(p, 1)) for n, p in self])
def __init__(self, name=None, config=Config(), pyinclude=None, wait=True,
perform_teardown=True, cl_args=None, expect=Process.RUNNING):
"""
@param name: name used for for output files, default to id from config.
@param config: router configuration
@keyword wait: wait for router to be ready (call self.wait_ready())
"""
cl_args = cl_args or []
self.config = copy(config)
self.perform_teardown = perform_teardown
if not name:
name = self.config.router_id
assert name
# setup log and debug dump files
self.dumpfile = os.path.abspath('%s-qddebug.txt' % name)
self.config.sections('router')[0]['debugDumpFile'] = self.dumpfile
default_log = [l for l in config if (l[0] == 'log' and l[1]['module'] == 'DEFAULT')]
if not default_log:
self.logfile = "%s.log" % name
config.append(
('log', {'module': 'DEFAULT', 'enable': 'trace+',
'includeSource': 'true', 'outputFile': self.logfile}))
else:
self.logfile = default_log[0][1].get('outputFile')
args = ['skrouterd', '-c', config.write(name)] + cl_args
env_home = os.environ.get('QPID_DISPATCH_HOME')
if pyinclude:
args += ['-I', pyinclude]
elif env_home:
args += ['-I', os.path.join(env_home, 'python')]
args = os.environ.get('QPID_DISPATCH_RUNNER', '').split() + args
super(Qdrouterd, self).__init__(args, name=name, expect=expect)
self._management = None
self._wait_ready = False
if wait:
self.wait_ready()
@property
def management(self):
"""Return a management agent proxy for this router"""
if not self._management:
self._management = Node.connect(self.addresses[0], timeout=TIMEOUT)
return self._management
def teardown(self):
if self._management:
try:
self._management.close()
except:
pass
self._management = None
if not self.perform_teardown:
return
teardown_exc = None
try:
super(Qdrouterd, self).teardown()
except Exception as exc:
# re-raise _after_ dumping all the state we can
teardown_exc = exc
def check_output_file(filename, description):
"""check router's debug dump file for anything interesting (should be
empty) and dump it to stderr for perusal by organic lifeforms"""
try:
if os.stat(filename).st_size > 0:
with open(filename) as f:
sys.stderr.write("\nRouter %s %s:\n>>>>\n" %
(self.config.router_id, description))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# failed to open file. This can happen when an individual test
# spawns a temporary router (i.e. not created as part of the
# TestCase setUpClass method) that gets cleaned up by the test.
pass
check_output_file(filename=self.outfile + '.out', description="output file")
check_output_file(filename=self.dumpfile, description="debug dump file")
if teardown_exc:
# teardown failed - possible router crash?
# dump extra stuff (command line, output, log)
def tail_file(fname, line_count=50):
"""Tail a file to a list"""
out = []
with open(fname) as f:
line = f.readline()
while line:
out.append(line)
if len(out) > line_count:
out.pop(0)
line = f.readline()
return out
try:
for fname in [("output", self.outfile + '.out'),
("command", self.outfile + '.cmd')]:
with open(fname[1]) as f:
sys.stderr.write("\nRouter %s %s file:\n>>>>\n" %
(self.config.router_id, fname[0]))
sys.stderr.write(f.read())
sys.stderr.write("\n<<<<\n")
if self.logfile:
sys.stderr.write("\nRouter %s log file tail:\n>>>>\n" %
self.config.router_id)
tail = tail_file(os.path.join(self.outdir, self.logfile))
for ln in tail:
sys.stderr.write("%s" % ln)
sys.stderr.write("\n<<<<\n")
sys.stderr.flush()
except OSError:
# ignore file not found in case test never opens these
pass
raise teardown_exc
@property
def ports_family(self):
"""
Return a dict of listener ports and the respective port family
Example -
{ 23456: 'IPv4', 243455: 'IPv6' }
"""
ports_fam = {}
for l in self.config.sections('listener'):
if l.get('socketAddressFamily'):
ports_fam[l['port']] = l['socketAddressFamily']
else:
ports_fam[l['port']] = 'IPv4'
return ports_fam
@property
def ports(self):
"""Return list of configured ports for all listeners"""
return [l['port'] for l in self.config.sections('listener')]
def _cfg_2_host_port(self, c):
host = c['host']
port = c['port']
socket_address_family = c.get('socketAddressFamily', 'IPv4')
if socket_address_family == 'IPv6':
return "[%s]:%s" % (host, port)
elif socket_address_family == 'IPv4':
return "%s:%s" % (host, port)
raise Exception("Unknown socket address family: %s" % socket_address_family)
@property
def http_addresses(self):
"""Return http://host:port addresses for all http listeners"""
cfg = self.config.sections('httpListener')
return ["http://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def addresses(self):
"""Return amqp://host:port addresses for all listeners"""
cfg = self.config.sections('listener')
return ["amqp://%s" % self._cfg_2_host_port(l) for l in cfg]
@property
def connector_addresses(self):
"""Return list of amqp://host:port for all connectors"""
cfg = self.config.sections('connector')
return ["amqp://%s" % self._cfg_2_host_port(c) for c in cfg]
@property
def hostports(self):
"""Return host:port for all listeners"""
return [self._cfg_2_host_port(l) for l in self.config.sections('listener')]
def is_connected(self, port, host='127.0.0.1'):
"""If router has a connection to host:port:identity return the management info.
Otherwise return None"""
try:
ret_val = False
response = self.management.query(type="io.skupper.router.connection")
index_host = response.attribute_names.index('host')
for result in response.results:
outs = '%s:%s' % (host, port)
if result[index_host] == outs:
ret_val = True
return ret_val
except:
return False
def wait_address(self, address, subscribers=0, remotes=0, count=1, **retry_kwargs):
"""
Wait for an address to be visible on the router.
@keyword subscribers: Wait till subscriberCount >= subscribers
@keyword remotes: Wait till remoteCount >= remotes
@keyword count: Wait until >= count matching addresses are found
@param retry_kwargs: keyword args for L{retry}
"""
def check():
# TODO aconway 2014-06-12: this should be a request by name, not a query.
# Need to rationalize addresses in management attributes.
# endswith check is because of M/L/R prefixes
addrs = self.management.query(
type='io.skupper.router.router.address',
attribute_names=['name', 'subscriberCount', 'remoteCount']).get_entities()
addrs = [a for a in addrs if a['name'].endswith(address)]
return (len(addrs) >= count
and addrs[0]['subscriberCount'] >= subscribers
and addrs[0]['remoteCount'] >= remotes)
assert retry(check, **retry_kwargs)
def wait_address_unsubscribed(self, address, **retry_kwargs):
"""
Block until address has no subscribers
"""
a_type = 'io.skupper.router.router.address'
def check():
addrs = self.management.query(a_type).get_dicts()
rc = [a for a in addrs if a['name'].endswith(address)]
count = 0
for a in rc:
count += a['subscriberCount']
count += a['remoteCount']
return count == 0
assert retry(check, **retry_kwargs)
def get_host(self, socket_address_family):
if socket_address_family == 'IPv4':
return '127.0.0.1'
elif socket_address_family == 'IPv6':
return '::1'
else:
return '127.0.0.1'
def wait_ports(self, **retry_kwargs):
wait_ports(self.ports_family, **retry_kwargs)
def wait_connectors(self, **retry_kwargs):
"""
Wait for all connectors to be connected
@param retry_kwargs: keyword args for L{retry}
"""
for c in self.config.sections('connector'):
assert retry(lambda: self.is_connected(port=c['port'], host=self.get_host(c.get('socketAddressFamily'))),
**retry_kwargs), "Port not connected %s" % c['port']
def wait_startup_message(self, **retry_kwargs):
"""Wait for router startup message to be printed into logfile
This ensures that the router installs its signal handlers, avoiding
a router failure with return code -15 upon premature SIGTERM (DISPATCH-1689)
e.g. 2022-03-03 19:08:13.608655 +0100 SERVER (notice) Operational, 4 Threads Running (process ID 2190110)
"""
def _is_startup_line_present(f: TextIO) -> bool:
for line in f:
m = re.search(r'SERVER \(notice\) Operational, (\d+) Threads Running \(process ID (\d+)\)', line)
if m:
return True
return False
logfile_path = self.logfile_path
# system_tests_log_level_update filters SERVER module logs to a separate file
server_log = [l for l in self.config if (l[0] == 'log' and l[1]['module'] == 'SERVER')]
if server_log:
logfile_path = os.path.join(self.outdir, server_log[0][1].get('outputFile'))
assert retry(lambda: pathlib.Path(logfile_path).is_file(), **retry_kwargs), \
f"Router logfile {logfile_path} does not exist or is not a file"
with open(logfile_path, 'rt') as router_log:
assert retry(lambda: _is_startup_line_present(router_log), **retry_kwargs),\
"Router startup line not present in router log"
def wait_ready(self, **retry_kwargs):
"""Wait for ports and connectors to be ready"""
if not self._wait_ready:
self._wait_ready = True
self.wait_ports(**retry_kwargs)
self.wait_connectors(**retry_kwargs)
self.wait_startup_message(**retry_kwargs)
return self
def is_router_connected(self, router_id, **retry_kwargs):
node = None
try:
self.management.read(identity="router.node/%s" % router_id)
# TODO aconway 2015-01-29: The above check should be enough, we
# should not advertise a remote router in management till it is fully
# connected. However we still get a race where the router is not
# actually ready for traffic. Investigate.
# Meantime the following actually tests send-thru to the router.
node = Node.connect(self.addresses[0], router_id, timeout=1)
return retry_exception(lambda: node.query('io.skupper.router.router'))
except (proton.ConnectionException, NotFoundStatus, proton.utils.LinkDetached):
# proton.ConnectionException: the router is not yet accepting connections
# NotFoundStatus: the queried router is not yet connected
# TODO(DISPATCH-2119) proton.utils.LinkDetached: should be removed, currently needed for DISPATCH-2033
return False
finally:
if node:
node.close()
def wait_router_connected(self, router_id, **retry_kwargs):
retry(lambda: self.is_router_connected(router_id), **retry_kwargs)
@property
def logfile_path(self):
"""Path to a DEFAULT logfile"""
return os.path.join(self.outdir, self.logfile)
class Tester:
"""Tools for use by TestCase
- Create a directory for the test.
- Utilities to create processes and servers, manage ports etc.
- Clean up processes on teardown"""
# Top level directory above any Tester directories.
# CMake-generated configuration may be found here.
top_dir = os.getcwd()
# The root directory for Tester directories, under top_dir
root_dir = os.path.abspath(__name__ + '.dir')
# Minimum and maximum port number for free port searches
port_range = (20000, 30000)
def __init__(self, id):
"""
@param id: module.class.method or False if no directory should be created
"""
self.directory = os.path.join(self.root_dir, *id.split('.')) if id else None
self.cleanup_list = []
self.port_file = pathlib.Path(self.top_dir, "next_port.lock").open("a+t")
self.cleanup(self.port_file)
def rmtree(self):
"""Remove old test class results directory"""
if self.directory:
shutil.rmtree(os.path.dirname(self.directory), ignore_errors=True)
def setup(self):
"""Called from test setup and class setup."""
if self.directory:
os.makedirs(self.directory)
os.chdir(self.directory)
def _next_port(self) -> int:
"""Reads and increments value stored in self.port_file, under an exclusive file lock.
When a lock cannot be acquired immediately, fcntl.lockf blocks.
Failure possibilities:
File locks may not work correctly on network filesystems. We still should be no worse off than we were.
This method always unlocks the lock file, so it should not ever deadlock other tests running in parallel.
Even if that happened, the lock is unlocked by the OS when the file is closed, which happens automatically
when the process that opened and locked it ends.
Invalid content in the self.port_file will break this method. Manual intervention is then required.
"""
try:
fcntl.flock(self.port_file, fcntl.LOCK_EX)
# read old value
self.port_file.seek(0, os.SEEK_END)
if self.port_file.tell() != 0:
self.port_file.seek(0)
port = int(self.port_file.read())
else:
# file is empty
port = random.randint(self.port_range[0], self.port_range[1])
next_port = port + 1
if next_port >= self.port_range[1]:
next_port = self.port_range[0]
# write new value
self.port_file.seek(0)
self.port_file.truncate(0)
self.port_file.write(str(next_port))
self.port_file.flush()
return port
finally:
fcntl.flock(self.port_file, fcntl.LOCK_UN)
def teardown(self):
"""Clean up (tear-down, stop or close) objects recorded via cleanup()"""
self.cleanup_list.reverse()
errors = []
for obj in self.cleanup_list:
try:
for method in ["teardown", "tearDown", "stop", "close"]:
cleanup = getattr(obj, method, None)
if cleanup:
cleanup()
break
except Exception as exc:
errors.append(exc)
if errors:
raise RuntimeError("Errors during teardown: \n\n%s" % "\n\n".join([str(e) for e in errors]))
def cleanup(self, x):
"""Record object x for clean-up during tear-down.
x should have on of the methods teardown, tearDown, stop or close"""
self.cleanup_list.append(x)
return x
def popen(self, *args, **kwargs) -> Process:
"""Start a Process that will be cleaned up on teardown"""
return self.cleanup(Process(*args, **kwargs))
def qdrouterd(self, *args, **kwargs) -> Qdrouterd:
"""Return a Qdrouterd that will be cleaned up on teardown"""
return self.cleanup(Qdrouterd(*args, **kwargs))
def http2server(self, *args, **kwargs):
return self.cleanup(Http2Server(*args, **kwargs))
def get_port(self, socket_address_family: str = 'IPv4') -> int:
"""Get an unused port"""
p = self._next_port()
start = p
while not is_port_available(p, socket_address_family):
p = self._next_port()
if p == start:
raise Exception("No available ports in range %s", self.port_range)
return p
class TestCase(unittest.TestCase, Tester): # pylint: disable=too-many-public-methods
"""A TestCase that sets up its own working directory and is also a Tester."""
tester: Tester
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
Tester.__init__(self, self.id())
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.maxDiff = None
cls.tester = Tester('.'.join([cls.__module__, cls.__name__, 'setUpClass']))
cls.tester.rmtree()
cls.tester.setup()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tester'):
cls.tester.teardown()
del cls.tester
super().tearDownClass()
def setUp(self):
super().setUp()
Tester.setup(self)
def tearDown(self):
Tester.teardown(self)
super().tearDown()
def assert_fair(self, seq):
avg = sum(seq) / len(seq)
for i in seq:
assert i > avg / 2, "Work not fairly distributed: %s" % seq
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, text, regexp, msg=None):
assert re.search(regexp, text), msg or "Can't find %r in '%s'" % (regexp, text)
if not hasattr(unittest.TestCase, 'assertNotRegex'):
def assertNotRegex(self, text, regexp, msg=None):
assert not re.search(regexp, text), msg or "Found %r in '%s'" % (regexp, text)
def main_module():
"""
Return the module name of the __main__ module - i.e. the filename with the
path and .py extension stripped. Useful to run the tests in the current file but
using the proper module prefix instead of '__main__', as follows:
if __name__ == '__main__':
unittest.main(module=main_module())
"""
return os.path.splitext(os.path.basename(__main__.__file__))[0]
class AsyncTestReceiver(MessagingHandler):
"""
A simple receiver that runs in the background and queues any received
messages. Messages can be retrieved from this thread via the queue member.
:param wait: block the constructor until the link has been fully
established.
:param recover_link: restart on remote link detach
"""
Empty = Queue.Empty
class MyQueue(Queue.Queue):
def __init__(self, receiver):
self._async_receiver = receiver
super(AsyncTestReceiver.MyQueue, self).__init__()
def get(self, timeout=TIMEOUT):
self._async_receiver.num_queue_gets += 1
msg = super(AsyncTestReceiver.MyQueue, self).get(timeout=timeout)
self._async_receiver._logger.log("message %d get"
% self._async_receiver.num_queue_gets)
return msg
def put(self, msg):
self._async_receiver.num_queue_puts += 1
super(AsyncTestReceiver.MyQueue, self).put(msg)
self._async_receiver._logger.log("message %d put"
% self._async_receiver.num_queue_puts)
def __init__(self, address, source, conn_args=None, container_id=None,
wait=True, recover_link=False, msg_args=None, print_to_console=False):
if msg_args is None:
msg_args = {}
super(AsyncTestReceiver, self).__init__(**msg_args)
self.address = address
self.source = source
self.conn_args = conn_args
self.queue = AsyncTestReceiver.MyQueue(self)
self._conn = None
self._container = Container(self)
cid = container_id or "ATR-%s:%s" % (source, uuid.uuid4())
self._container.container_id = cid
self._ready = Event()
self._recover_link = recover_link
self._recover_count = 0
self._stop_thread = False
self._thread = Thread(target=self._main)
self._logger = Logger(title="AsyncTestReceiver %s" % cid, print_to_console=print_to_console)
self._thread.daemon = True
self._thread.start()
self.num_queue_puts = 0
self.num_queue_gets = 0
if wait and self._ready.wait(timeout=TIMEOUT) is False:
raise Exception("Timed out waiting for receiver start")
self.queue_stats = "self.num_queue_puts=%d, self.num_queue_gets=%d"
def get_queue_stats(self):
return self.queue_stats % (self.num_queue_puts, self.num_queue_gets)
def _main(self):
self._container.timeout = 0.5
self._container.start()
self._logger.log("AsyncTestReceiver Starting reactor")
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
self._logger.log("AsyncTestReceiver reactor thread done")
def on_connection_error(self, event):
self._logger.log("AsyncTestReceiver on_connection_error=%s" % event.connection.remote_condition.description)
def on_link_error(self, event):
self._logger.log("AsyncTestReceiver on_link_error=%s" % event.link.remote_condition.description)
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
self._logger.log("thread done")
if self._thread.is_alive():
raise Exception("AsyncTestReceiver did not exit")
del self._conn
del self._container
def on_start(self, event):
kwargs = {'url': self.address}
if self.conn_args:
kwargs.update(self.conn_args)
self._conn = event.container.connect(**kwargs)
def on_connection_opened(self, event):
self._logger.log("Connection opened")
kwargs = {'source': self.source}
event.container.create_receiver(event.connection, **kwargs)
def on_link_opened(self, event):
self._logger.log("link opened")
self._ready.set()
def on_link_closing(self, event):
self._logger.log("link closing")
event.link.close()
if self._recover_link and not self._stop_thread:
# lesson learned: the generated link name will be the same as the
# old link (which is bad) so we specify a new one
self._recover_count += 1
kwargs = {'source': self.source,
'name': "%s:%s" % (event.link.name, self._recover_count)}
rcv = event.container.create_receiver(event.connection,
**kwargs)
def on_message(self, event):
self.queue.put(event.message)
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self._logger.log("Disconnected")
if self._conn:
self._conn.close()
self._conn = None
def dump_log(self):
self._logger.dump()
class AsyncTestSender(MessagingHandler):
"""
A simple sender that runs in the background and sends 'count' messages to a
given target.
"""
class TestSenderException(Exception):
def __init__(self, error=None):
super(AsyncTestSender.TestSenderException, self).__init__(error)
def __init__(self, address, target, count=1, message=None,
container_id=None, presettle=False, print_to_console=False):
super(AsyncTestSender, self).__init__(auto_accept=False,
auto_settle=False)
self.address = address
self.target = target
self.total = count
self.presettle = presettle
self.accepted = 0
self.released = 0
self.modified = 0
self.rejected = 0
self.sent = 0
self.error = None
self.link_stats = None
self._conn = None
self._sender = None
self._message = message or Message(body="test")
self._container = Container(self)
cid = container_id or "ATS-%s:%s" % (target, uuid.uuid4())
self._container.container_id = cid
self._link_name = "%s-%s" % (cid, "tx")
self._thread = Thread(target=self._main)
self._thread.daemon = True
self._logger = Logger(title="AsyncTestSender %s" % cid, print_to_console=print_to_console)
self._thread.start()
self.msg_stats = "self.sent=%d, self.accepted=%d, self.released=%d, self.modified=%d, self.rejected=%d"
def _main(self):
self._container.timeout = 0.5
self._container.start()
self._logger.log("AsyncTestSender Starting reactor")
while self._container.process():
self._check_if_done()
self._logger.log("AsyncTestSender reactor thread done")
def get_msg_stats(self):
return self.msg_stats % (self.sent, self.accepted, self.released, self.modified, self.rejected)
def wait(self):
# don't stop it - wait until everything is sent
self._logger.log("AsyncTestSender wait: about to join thread")
self._thread.join(timeout=TIMEOUT)
self._logger.log("AsyncTestSender wait: thread done")
assert not self._thread.is_alive(), "sender did not complete"
if self.error:
raise AsyncTestSender.TestSenderException(self.error)
del self._sender
del self._conn
del self._container
self._logger.log("AsyncTestSender wait: no errors in wait")
def on_start(self, event):
self._conn = self._container.connect(self.address)
def on_connection_opened(self, event):
self._logger.log("Connection opened")
option = AtMostOnce if self.presettle else AtLeastOnce
self._sender = self._container.create_sender(self._conn,
target=self.target,
options=option(),
name=self._link_name)
def on_sendable(self, event):
if self.sent < self.total:
self._sender.send(self._message)
self.sent += 1
self._logger.log("message %d sent" % self.sent)
def _check_if_done(self):
done = (self.sent == self.total
and (self.presettle
or (self.accepted + self.released + self.modified
+ self.rejected == self.sent)))
if done and self._conn:
self.link_stats = get_link_info(self._link_name,
self.address)
self._conn.close()
self._conn = None
self._logger.log("Connection closed")
def on_accepted(self, event):
self.accepted += 1
event.delivery.settle()
self._logger.log("message %d accepted" % self.accepted)
def on_released(self, event):
# for some reason Proton 'helpfully' calls on_released even though the
# delivery state is actually MODIFIED
if event.delivery.remote_state == Delivery.MODIFIED:
return self.on_modified(event)
self.released += 1
event.delivery.settle()
self._logger.log("message %d released" % self.released)
def on_modified(self, event):
self.modified += 1
event.delivery.settle()
self._logger.log("message %d modified" % self.modified)
def on_rejected(self, event):
self.rejected += 1
event.delivery.settle()
self._logger.log("message %d rejected" % self.rejected)
def on_link_error(self, event):
self.error = "link error:%s" % str(event.link.remote_condition)
self._logger.log(self.error)
if self._conn:
self._conn.close()
self._conn = None
def on_disconnected(self, event):
# if remote terminates the connection kill the thread else it will spin
# on the cpu
self.error = "connection to remote dropped"
self._logger.log(self.error)
if self._conn:
self._conn.close()
self._conn = None
def dump_log(self):
self._logger.dump()
class QdManager:
"""
A means to invoke skmanage during a testcase
"""
def __init__(self, address: Optional[str] = None,
timeout: Optional[float] = TIMEOUT,
router_id: Optional[str] = None,
edge_router_id: Optional[str] = None) -> None:
# 'tester' - can be 'self' when called in a test,
# or an instance any class derived from Process (like Qdrouterd)
self._timeout = timeout
self._address = address
self.router_id = router_id
self.edge_router_id = edge_router_id
self.router: List[str] = []
if self.router_id:
self.router = self.router + ['--router', self.router_id]
elif self.edge_router_id:
self.router = self.router + ['--edge-router', self.edge_router_id]
def __call__(self, cmd: str,
address: Optional[str] = None,
input: Optional[str] = None,
timeout: Optional[float] = None) -> str:
addr = address or self._address
assert addr, "address missing"
with subprocess.Popen(['skmanage'] + cmd.split(' ') + self.router
+ ['--bus', addr, '--indent=-1', '--timeout',
str(timeout or self._timeout)], stdin=PIPE,
stdout=PIPE, stderr=STDOUT,
universal_newlines=True) as p:
rc = p.communicate(input)
if p.returncode != 0:
raise Exception("%s %s" % rc)
return rc[0]
def create(self, long_type, kwargs):
cmd = "CREATE --type=%s" % long_type
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def update(self, long_type, kwargs, name=None, identity=None):
cmd = 'UPDATE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
for k, v in kwargs.items():
cmd += " %s=%s" % (k, v)
return json.loads(self(cmd))
def delete(self, long_type, name=None, identity=None):
cmd = 'DELETE --type=%s' % long_type
if identity is not None:
cmd += " --identity=%s" % identity
elif name is not None:
cmd += " --name=%s" % name
else:
assert False, "name or identity not supplied!"
self(cmd)
def query(self, long_type):
return json.loads(self('QUERY --type=%s' % long_type))
def get_log(self, limit=None):
cmd = 'GET-LOG'
if (limit):
cmd += " limit=%s" % limit
return json.loads(self(cmd))
class MgmtMsgProxy:
"""
Utility for creating and inspecting management messages
"""
class _Response:
def __init__(self, status_code, status_description, body):
self.status_code = status_code
self.status_description = status_description
if body.__class__ == dict and len(body.keys()) == 2 and 'attributeNames' in body.keys() and 'results' in body.keys():
results = []
names = body['attributeNames']
for result in body['results']:
result_map = {}
for i in range(len(names)):
result_map[names[i]] = result[i]
results.append(MgmtMsgProxy._Response(status_code, status_description, result_map))
self.attrs = {'results': results}
else:
self.attrs = body
def __getattr__(self, key):
return self.attrs[key]
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
return self._Response(ap['statusCode'], ap['statusDescription'], msg.body)
def query_router(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.router'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_connections(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.connection'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_links(self):
ap = {'operation': 'QUERY', 'type': 'io.skupper.router.router.link'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY',
'type': 'io.skupper.router.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
def create_connector(self, name, **kwargs):
ap = {'operation': 'CREATE',
'type': 'io.skupper.router.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr,
body=kwargs)
def delete_connector(self, name):
ap = {'operation': 'DELETE',
'type': 'io.skupper.router.connector',
'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
class TestTimeout:
"""
A callback object for MessagingHandler class
parent: A MessagingHandler with a timeout() method
"""
__test__ = False
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class PollTimeout:
"""
A callback object for MessagingHandler scheduled timers
parent: A MessagingHandler with a poll_timeout() method
"""
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.poll_timeout()
def get_link_info(name, address):
"""
Query the router at address for the status and statistics of the named link
"""
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.link')
for item in rc:
if item.get('name') == name:
return item
return None
def has_mobile_dest_in_address_table(address, dest):
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.address')
has_dest = False
for item in rc:
if dest in item.get("name"):
has_dest = True
break
return has_dest
def get_inter_router_links(address):
"""
Return a list of all links with type="inter-router
:param address:
"""
inter_router_links = []
qdm = QdManager(address=address)
rc = qdm.query('io.skupper.router.router.link')
for item in rc:
if item.get("linkType") == "inter-router":
inter_router_links.append(item)
return inter_router_links
class Timestamp:
"""
Time stamps for logging.
"""
def __init__(self):
self.ts = datetime.now()
def __str__(self):
return self.ts.strftime("%Y-%m-%d %H:%M:%S.%f")
class Logger:
"""
Record an event log for a self test.
May print per-event or save events to be printed later.
Pytest will automatically collect the logs and will dump them for a failed test
Optional file opened in 'append' mode to which each log line is written.
"""
def __init__(self,
title: str = "Logger",
print_to_console: bool = False,
save_for_dump: bool = True,
python_log_level: Optional[int] = logging.DEBUG,
ofilename: Optional[str] = None) -> None:
self.title = title
self.print_to_console = print_to_console
self.save_for_dump = save_for_dump
self.python_log_level = python_log_level
self.ofilename = ofilename
self.logs: List[Tuple[Timestamp, str]] = []
def log(self, msg):
ts = Timestamp()
if self.save_for_dump:
self.logs.append((ts, msg))
if self.print_to_console:
print("%s %s" % (ts, msg))
sys.stdout.flush()
if self.python_log_level is not None:
logging.log(self.python_log_level, f"{ts} {self.title}: {msg}")
if self.ofilename is not None:
with open(self.ofilename, 'a') as f_out:
f_out.write("%s %s\n" % (ts, msg))
f_out.flush()
def dump(self):
print(self)
sys.stdout.flush()
def __str__(self):
lines = [self.title]
for ts, msg in self.logs:
lines.append("%s %s" % (ts, msg))
res = str('\n'.join(lines))
return res
def curl_available():
"""
Check if the curl command line tool is present on the system.
Return a tuple containing the version if found, otherwise
return false.
"""
popen_args = ['curl', '--version']
try:
process = Process(popen_args,
name='curl_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
if process.returncode == 0:
# return curl version as a tuple (major, minor[,fix])
# expects --version outputs "curl X.Y.Z ..."
return tuple([int(x) for x in out.split()[1].split('.')])
except:
pass
return False
def run_curl(args, input=None, timeout=TIMEOUT):
"""
Run the curl command with the given argument list.
Pass optional input to curls stdin.
Return tuple of (return code, stdout, stderr)
"""
popen_args = ['curl'] + args
if timeout is not None:
popen_args = popen_args + ["--max-time", str(timeout)]
stdin_value = PIPE if input is not None else None
with subprocess.Popen(popen_args, stdin=stdin_value, stdout=PIPE,
stderr=PIPE, universal_newlines=True) as p:
out = p.communicate(input, timeout)
return p.returncode, out[0], out[1]
|
gdal2tiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
input_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
output_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail, queue=None):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ty, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (y, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tile_size
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than GDAL_CACHEMAX
os.environ['GDAL_CACHEMAX'] = '%d' % int(gdal.GetCacheMax() / nb_processes)
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
main()
# vim: set tabstop=4 shiftwidth=4 expandtab:
|
__init__.py
|
# Copyright (c) 2021 Boris Posavec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
bl_info = {
"name": "UV-Packer",
"description": "Automated, fast, accurate, free UV-Packing",
"blender": (2, 90, 0),
"version" : (1, 0, 0),
"category": "UV",
"author": "Boris Posavec",
"location": "UV Editing > Sidebar > UV-Packer",
"wiki_url": "https://doc.uv-packer.com",
"tracker_url": "https://discord.gg/r8jPCWk",
"support": "COMMUNITY",
}
import bpy
import bmesh
import os
import webbrowser
import subprocess
import time
import queue
import threading
import struct
from bpy.props import (StringProperty, BoolProperty, IntProperty, FloatProperty, FloatVectorProperty, EnumProperty, PointerProperty)
from bpy.types import (Panel, Menu, Operator, PropertyGroup, AddonPreferences)
class misc:
UV_PACKER_MAP_NAME = "UV-Packer"
def set_map_name(name):
global UV_PACKER_MAP_NAME
UV_PACKER_MAP_NAME = name
return
def get_map_name():
global UV_PACKER_MAP_NAME
return UV_PACKER_MAP_NAME
def add_uv_channel_to_objects(objects):
for obj in objects:
if obj.type != "MESH":
continue
found = False
for uv_layer in obj.data.uv_layers:
if uv_layer.name == misc.get_map_name():
found = True
continue
if found == False:
obj.data.uv_layers.new(name=misc.get_map_name())
obj.data.uv_layers.active = obj.data.uv_layers[misc.get_map_name()]
return
def remove_uv_channel_from_objects(objects, name):
for obj in objects:
if obj.type != "MESH":
continue
uvs = obj.data.uv_layers
if name in uvs:
uvs.remove(uvs[name])
return
def gather_object_data(obj):
bm = bmesh.from_edit_mesh(obj.data)
bm.normal_update()
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
uv_layer = bm.loops.layers.uv.verify()
data = bytearray()
nameBytes = obj.name.encode()
data += (len(nameBytes)).to_bytes(4, byteorder="little")
data.extend(nameBytes)
data += (len(bm.verts)).to_bytes(4, byteorder="little")
for vert in bm.verts:
data += bytearray(struct.pack("<ddd", vert.co.x, vert.co.y, vert.co.z))
indexCount = 0
data += (len(bm.faces)).to_bytes(4, byteorder="little")
for i, face in enumerate(bm.faces):
data += (len(face.loops)).to_bytes(4, byteorder="little")
for loop in face.loops:
vert = loop.vert
data += (vert.index).to_bytes(4, byteorder="little")
data += bytearray(struct.pack("<ddd", vert.normal.x, vert.normal.y, vert.normal.z))
uv_coord = loop[uv_layer].uv
data += bytearray(struct.pack("<dd", uv_coord.x, uv_coord.y))
data += (indexCount).to_bytes(4, byteorder="little")
indexCount += 1
return data
def replace_object_data(obj, message, readPtr):
bm = bmesh.from_edit_mesh(obj.data)
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
uv_layer = bm.loops.layers.uv.verify()
faces = [f for f in bm.faces]
numResultVerts = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
for face in faces:
for loop in face.loops:
x = struct.unpack_from("<d", message, readPtr)[0]
readPtr += 8
y = struct.unpack_from("<d", message, readPtr)[0]
readPtr += 8
loop[uv_layer].uv = [x, y]
bmesh.update_edit_mesh(obj.data, False, False)
return readPtr
class QueueMessage:
MESSAGE = 0
PROGRESS = 1
STATS = 2
COMPLETE = 3
class QueueMsgSeverity:
INFO = 0
WARNING = 1
ERROR = 2
def encodeOptions(options):
data = bytearray()
data += (options["PackMode"]).to_bytes(4, byteorder="little")
data += (options["Width"]).to_bytes(4, byteorder="little")
data += (options["Height"]).to_bytes(4, byteorder="little")
data += bytearray(struct.pack("<d", options["Padding"]))
data += bytearray(struct.pack("<?", options["Combine"]))
data += bytearray(struct.pack("<?", options["Rescale"]))
data += bytearray(struct.pack("<?", options["PreRotate"]))
data += bytearray(struct.pack("<?", options["FullRotation"]))
data += (options["Rotation"]).to_bytes(4, byteorder="little")
data += (options["TilesX"]).to_bytes(4, byteorder="little")
data += (options["TilesY"]).to_bytes(4, byteorder="little")
return data
def data_exchange_thread(process, options, meshes, msg_queue):
numObjects = len(meshes)
if numObjects == 0:
msg_queue.put((misc.QueueMessage.MESSAGE, "No objects to pack.", misc.QueueMsgSeverity.ERROR))
return
msg_queue.put((misc.QueueMessage.MESSAGE, "Preparing geometry"))
binaryData = bytearray()
binaryData += (bl_info["version"][0]).to_bytes(4, byteorder="little")
binaryData += (bl_info["version"][1]).to_bytes(4, byteorder="little")
binaryData += (bl_info["version"][2]).to_bytes(4, byteorder="little")
binaryData += misc.encodeOptions(options)
binaryData += (numObjects).to_bytes(4, byteorder="little")
for object_idx, obj in enumerate(meshes):
binaryData += (object_idx).to_bytes(4, byteorder="little")
binaryData += misc.gather_object_data(obj)
sumBytes = len(binaryData)
binaryData = sumBytes.to_bytes(4, byteorder="little") + binaryData
msg_queue.put((misc.QueueMessage.MESSAGE, "Packing"))
try:
out_stream = process.stdin
out_stream.write(binaryData)
out_stream.flush()
message = ""
while True:
messageSize = struct.unpack("<I", process.stdout.read(4))[0]
message = process.stdout.read(messageSize)
readPtr = 0
messageType = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
if messageType == 0: # success
break
elif messageType == 1: # progress
msg_queue.put((misc.QueueMessage.PROGRESS, struct.unpack_from("<d", message, readPtr)[0]))
elif messageType == 2: # error
msgSize = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
msg = message[readPtr:readPtr+msgSize].decode()
msg_queue.put((misc.QueueMessage.MESSAGE, msg, misc.QueueMsgSeverity.ERROR))
return
else:
print("Error: unsupported message " + str(messageType))
numObjects = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
for obj in range(0, numObjects):
objId = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
nameSize = struct.unpack_from("<I", message, readPtr)[0]
readPtr += 4
objName = message[readPtr:readPtr+nameSize].decode()
readPtr += nameSize
readPtr = misc.replace_object_data(meshes[objId], message, readPtr)
coverage = struct.unpack_from("<d", message, readPtr)[0]
msg_queue.put((misc.QueueMessage.STATS, str(round(coverage, 2))))
msg_queue.put((misc.QueueMessage.MESSAGE, "Packing complete", misc.QueueMsgSeverity.WARNING))
except:
return
def get_meshes(objects):
return [obj for obj in objects if obj.type=="MESH"]
def get_unique_objects(objects):
unique_meshes = []
unique_objects = []
for obj in objects:
if obj.data in unique_meshes:
continue
unique_meshes.append(obj.data)
unique_objects.append(obj)
return unique_objects
def resolve_engine(engine_str):
if engine_str == "OP0":
return 0
elif engine_str == "OP1":
return 1
else:
return 0
def ShowMessageBox(message = "", title = "Message Box", icon = "INFO"):
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
return
class UVPackProperty(PropertyGroup):
uvp_combine: BoolProperty(name="Combine", description="Pack all selected objects in one UV Sheet", default = True)
uvp_width: IntProperty(name="w:", description="UV Sheet Width", default = 1024, min = 8)
uvp_height: IntProperty(name="h:", description="UV Sheet Height", default = 1024, min = 8)
uvp_padding: FloatProperty(name="Padding", description="UV Sheet Padding", default = 2.0, min = 0.0)
uvp_engine: EnumProperty(
name="Dropdown:",
description="Chose Packing method",
items=
[
("OP0", "Efficient", "Best compromise for speed and space usage"),
("OP1", "High Quality", "Slowest but maximal space usage"),
],
default="OP0"
)
uvp_rescale: BoolProperty(name="Rescale UV-Charts", description="Rescale UV-Charts", default = True)
uvp_prerotate: BoolProperty(name="Pre-Rotate", description="Pre-rotate UV-Charts", default = True)
uvp_rotate: EnumProperty(
name="Rotation:",
description="Choose Rotation",
items=
[
("0", "0", "None"),
("1", "90", "90 degrees"),
("2", "45", "45 degrees"),
("3", "23", "23 degrees")
],
default="1"
)
uvp_fullRotate: BoolProperty(name="Ø", description="Use full rotation", default = False)
uvp_tilesX: IntProperty(name="Tiles X:", description="UV Tile Columns", default = 1, min = 1)
uvp_tilesY: IntProperty(name="Tiles Y:", description="UV Tile Rows", default = 1, min = 1)
uvp_create_channel: BoolProperty(name="Create new map channel", description="Create new Map channel for UV-Packer to store the results into", default = False)
uvp_channel_name: StringProperty(name="UV Map", description="Set name for the created channel", default="UV-Packer")
uvp_stats: StringProperty(name="Stats", description="Stats", default="0.0% ¦ 0s")
class UVPackerPanel(bpy.types.Panel):
bl_label = "UV-Packer"
bl_idname = "UVP_PT_layout"
bl_category = "UV-Packer"
bl_space_type = "IMAGE_EDITOR"
bl_region_type = "UI"
@classmethod
def poll(self, context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
packerProps = scene.UVPackerProps
obj = context.object
mesh = bpy.context.object.data
uv_map = mesh.uv_layers.active
row = layout.row()
row.scale_y = 3.0
row.operator("uvpackeroperator.packbtn", text="Pack")
row = layout.row(align=True)
row.prop(packerProps, "uvp_combine")
row = layout.row()
row.label(text="≡ UV Sheet:")
row.label(text=packerProps.uvp_stats)
row = layout.row(align=True)
row.scale_y = 1.5
row.operator("uvpackeroperator.sizebtn", text="512").size = 512
row.operator("uvpackeroperator.sizebtn", text="1k").size = 1024
row.operator("uvpackeroperator.sizebtn", text="2k").size = 2048
row.operator("uvpackeroperator.sizebtn", text="4k").size = 4096
row = layout.row(align=True)
row.alignment = "EXPAND"
row.prop(packerProps, "uvp_width")
row.prop(packerProps, "uvp_height")
layout.prop(packerProps, "uvp_padding")
layout.separator()
layout.label(text="≡ UV Packing Engine:")
layout.prop(packerProps, "uvp_engine", text="Type")
layout.prop(packerProps, "uvp_rescale")
layout.prop(packerProps, "uvp_prerotate")
row = layout.row(align=True)
row.scale_y = 1.5
row.prop(packerProps, "uvp_rotate", expand=True)
row.prop(packerProps, "uvp_fullRotate", toggle=True)
row = layout.row(align=True)
row.prop(packerProps, "uvp_tilesX")
row.prop(packerProps, "uvp_tilesY")
layout.separator()
layout.label(text="≡ UV Channel Controls:")
layout.prop(packerProps, "uvp_create_channel")
layout.prop(packerProps, "uvp_channel_name")
layout.operator("uvpackeroperator.clearmaptoolbtn", text="Remove Map From Selection")
layout.separator()
versionStr = "UV-Packer Version: %d.%d.%d" % bl_info["version"]
layout.label(text=versionStr, icon="SETTINGS")
row = layout.row()
row.scale_y = 1.5
row.operator("wm.url_open", text="UV-Packer Homepage", icon="HOME").url = "https://www.uv-packer.com"
row = layout.row()
row.scale_y = 1.5
row.operator("wm.url_open", text="Documentation" , icon="QUESTION").url = "https://doc.uv-packer.com/"
class UVPackerPackButtonOperator(Operator):
bl_idname = "uvpackeroperator.packbtn"
bl_label = "Pack"
bl_options = {"REGISTER", "UNDO"}
bl_description = "Pack selected UVs"
def update_status(self, msg, severity="INFO"):
self.report({severity}, msg)
def execute(self, context):
self.timer = time.time()
self.coverage = 0.0
packer_props = context.scene.UVPackerProps
packer_props.dbg_msg = ""
if len(bpy.context.selected_objects) == 0:
return {"FINISHED"}
options = {
"PackMode": misc.resolve_engine(packer_props.uvp_engine),
"Width": packer_props.uvp_width,
"Height": packer_props.uvp_height,
"Padding": packer_props.uvp_padding,
"Rescale": packer_props.uvp_rescale,
"PreRotate": packer_props.uvp_prerotate,
"Rotation": int(packer_props.uvp_rotate),
"FullRotation": packer_props.uvp_fullRotate,
"Combine": packer_props.uvp_combine,
"TilesX": packer_props.uvp_tilesX,
"TilesY": packer_props.uvp_tilesY
}
packerDir = os.path.dirname(os.path.realpath(__file__))
packerExe = packerDir + "\\UV-Packer-Blender.exe"
try:
self.process = subprocess.Popen([packerExe], stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
except:
msgStr = "UV-Packer executable not found. Please copy UV-Packer-Blender.exe to: " + packerDir
self.update_status(msgStr, "ERROR")
return {"FINISHED"}
wm = context.window_manager
wm.modal_handler_add(self)
unique_objects = misc.get_unique_objects(context.selected_objects)
meshes = misc.get_meshes(unique_objects)
if packer_props.uvp_create_channel:
misc.set_map_name(packer_props.uvp_channel_name)
misc.add_uv_channel_to_objects(unique_objects)
bpy.ops.object.mode_set(mode = "EDIT")
self.msg_queue = queue.SimpleQueue()
self.packer_thread = threading.Thread(target=misc.data_exchange_thread, args=(self.process, options, meshes, self.msg_queue))
self.packer_thread.daemon = True
self.packer_thread.start()
return {"RUNNING_MODAL"}
def modal(self, context, event):
self.CheckUserCancel(event)
if self.CheckMessages():
context.scene.UVPackerProps.uvp_stats = f"{self.coverage}% ¦ {round(time.time() - self.timer, 2)}s"
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
return {"FINISHED"}
if not self.packer_thread.is_alive() and self.process.poll() is not None:
self.msg_queue.put((misc.QueueMessage.COMPLETE, 1))
return {"RUNNING_MODAL"}
def CheckUserCancel(self, event):
if event.type == "ESC":
self.process.terminate()
self.update_status("UV-Packer cancelled")
def CheckMessages(self):
while True:
try:
item = self.msg_queue.get_nowait()
except queue.Empty as ex:
break
if item[0] == misc.QueueMessage.PROGRESS:
progress_str = "Progress: %d %%" % (int(item[1] * 100.0))
self.update_status(progress_str)
elif item[0] == misc.QueueMessage.MESSAGE:
if (len(item) > 2):
if (item[2] == misc.QueueMsgSeverity.WARNING):
self.update_status(item[1], "WARNING")
elif (item[2] == misc.QueueMsgSeverity.ERROR):
self.update_status(item[1], "ERROR")
misc.ShowMessageBox(item[1], "Error", "ERROR")
else:
self.update_status(item[1], "INFO")
else:
self.update_status(item[1], "INFO")
elif item[0] == misc.QueueMessage.STATS:
self.coverage = item[1]
elif item[0] == misc.QueueMessage.COMPLETE:
return True
return False
class UVPackerSizeButtonOperator(Operator):
bl_idname = "uvpackeroperator.sizebtn"
bl_label = "Size"
bl_description = "UV Sheet dimension"
size: bpy.props.IntProperty()
def execute(self, context):
context.scene.UVPackerProps.uvp_width = self.size
context.scene.UVPackerProps.uvp_height = self.size
return {"FINISHED"}
class UVPackerRotationButtonOperator(Operator):
bl_idname = "uvpackeroperator.rotbtn"
bl_label = "Rotation"
rotation: bpy.props.IntProperty()
def execute(self, context):
context.scene.UVPackerProps.uvp_rotate = self.rotation
return {"FINISHED"}
class UVPackerFullRotationButtonOperator(Operator):
bl_idname = "uvpackeroperator.fullrotbtn"
bl_label = "Full Rotation"
def execute(self, context):
context.scene.UVPackerProps.uvp_fullRotate = not context.scene.UVPackerProps.uvp_fullRotate
return {"FINISHED"}
class UVPackerToolClearMapButtonOperator(Operator):
bl_idname = "uvpackeroperator.clearmaptoolbtn"
bl_label = "Remove UV map from selected"
bl_description = "Delete this UV Map from selected object(s)"
def execute(self, context):
name = context.scene.UVPackerProps.uvp_channel_name
misc.remove_uv_channel_from_objects(bpy.context.selected_objects, name)
return {"FINISHED"}
registered_classes = []
classes = (UVPackProperty, UVPackerPanel, UVPackerPackButtonOperator, UVPackerSizeButtonOperator,
UVPackerRotationButtonOperator, UVPackerFullRotationButtonOperator, UVPackerToolClearMapButtonOperator)
def register():
if bpy.app.version < (2, 83, 0):
return
from bpy.utils import register_class
for cls in classes:
bpy.utils.register_class(cls)
registered_classes.append(cls)
bpy.types.Scene.UVPackerProps = PointerProperty(type=UVPackProperty)
def unregister():
from bpy.utils import unregister_class
for cls in registered_classes:
bpy.utils.unregister_class(cls)
del bpy.types.Scene.UVPackerProps
if __name__ == "__main__":
register()
|
trader.py
|
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File: Trader.
# **********************************************************************************#
from utils.decorator import singleton
from . configs import *
uwsgi_tag = False
try:
from uwsgidecorators import thread, cron
uwsgi_tag = True
except:
from threading import Thread
def thread(func):
def decorator():
t = Thread(target=func)
t.start()
return decorator
def spool(func):
def wrapper(*args, **kw):
return func(*args, **kw)
return wrapper
def cron(a, b, c, d, e):
def decorator(func):
def wrapper(*args, **kw):
return func(*args, **kw)
return wrapper
return decorator
def timer(t):
def decorator(func):
def wrapper(*args, **kw):
return func(*args, **kw)
return wrapper
return decorator
_parse_slots = (lambda x: (int(x.split(':')[0]), int(x.split(':')[1])))
futures_pre_slots = _parse_slots(futures_pre_trading_task_time)
futures_post_slots = _parse_slots(futures_post_trading_task_time)
@cron(futures_pre_slots[1], futures_pre_slots[0], -1, -1, -1)
def pre_trading_task():
"""
Pre trading day task: including prepare history database, reload schema.
"""
pass
@cron(futures_pre_slots[1], futures_pre_slots[0], -1, -1, -1)
def post_trading_task():
"""
Post trading day task: including prepare history database, reload schema.
"""
pass
@thread
def feedback_worker():
"""
Trades worker: accept trades records from exchange.
"""
while feedback_worker_enable:
pass
pass
@thread
def database_worker():
"""
Database worker: synchronize valid database from redis to mongodb
"""
while database_worker_enable:
pass
pass
@singleton
class Trader(object):
def feed_orders(self, orders):
pass
def accept_trade(self):
pass
|
runners.py
|
# -*- coding: utf-8 -*-
import locale
import os
import re
from signal import SIGINT, SIGTERM
import struct
from subprocess import Popen, PIPE
import sys
import threading
import time
# Import some platform-specific things at top level so they can be mocked for
# tests.
try:
import pty
except ImportError:
pty = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import termios
except ImportError:
termios = None
from .exceptions import Failure, ThreadException
from .platform import (
WINDOWS, pty_size, character_buffered, ready_for_reading, read_byte,
)
from .util import has_fileno, isatty, ExceptionHandlingThread
from .vendor import six
class Runner(object):
"""
Partially-abstract core command-running API.
This class is not usable by itself and must be subclassed, implementing a
number of methods such as `start`, `wait` and `returncode`. For a subclass
implementation example, see the source code for `.Local`.
"""
read_chunk_size = 1000
input_sleep = 0.01
def __init__(self, context):
"""
Create a new runner with a handle on some `.Context`.
:param context:
a `.Context` instance, used to transmit default options and provide
access to other contextualized information (e.g. a remote-oriented
`.Runner` might want a `.Context` subclass holding info about
hostnames and ports.)
.. note::
The `.Context` given to `.Runner` instances **must** contain
default config values for the `.Runner` class in question. At a
minimum, this means values for each of the default
`.Runner.run` keyword arguments such as ``echo`` and ``warn``.
:raises exceptions.ValueError:
if not all expected default values are found in ``context``.
"""
#: The `.Context` given to the same-named argument of `__init__`.
self.context = context
#: A `threading.Event` signaling program completion.
#:
#: Typically set after `wait` returns. Some IO mechanisms rely on this
#: to know when to exit an infinite read loop.
self.program_finished = threading.Event()
# I wish Sphinx would organize all class/instance attrs in the same
# place. If I don't do this here, it goes 'class vars -> __init__
# docstring -> instance vars' :( TODO: consider just merging class and
# __init__ docstrings, though that's annoying too.
#: How many bytes (at maximum) to read per iteration of stream reads.
self.read_chunk_size = self.__class__.read_chunk_size
# Ditto re: declaring this in 2 places for doc reasons.
#: How many seconds to sleep on each iteration of the stdin read loop
#: and other otherwise-fast loops.
self.input_sleep = self.__class__.input_sleep
#: Whether pty fallback warning has been emitted.
self.warned_about_pty_fallback = False
#: The trigger/response mapping for use by `respond`. Is filled in at
#: runtime by `run`.
self.responses = None
def run(self, command, **kwargs):
"""
Execute ``command``, returning an instance of `Result`.
.. note::
All kwargs will default to the values found in this instance's
`~.Runner.context` attribute, specifically in its configuration's
``run`` subtree (e.g. ``run.echo`` provides the default value for
the ``echo`` keyword, etc). The base default values are described
in the parameter list below.
:param str command: The shell command to execute.
:param str shell: Which shell binary to use. Default: ``/bin/bash``.
:param bool warn:
Whether to warn and continue, instead of raising `.Failure`, when
the executed command exits with a nonzero status. Default:
``False``.
:param hide:
Allows the caller to disable ``run``'s default behavior of copying
the subprocess' stdout and stderr to the controlling terminal.
Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout
stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or
``hide='both'`` (or ``True``) to hide both streams.
The default value is ``None``, meaning to print everything;
``False`` will also disable hiding.
.. note::
Stdout and stderr are always captured and stored in the
``Result`` object, regardless of ``hide``'s value.
.. note::
``hide=True`` will also override ``echo=True`` if both are
given (either as kwargs or via config/CLI).
:param bool pty:
By default, ``run`` connects directly to the invoked process and
reads its stdout/stderr streams. Some programs will buffer (or even
behave) differently in this situation compared to using an actual
terminal or pseudoterminal (pty). To use a pty instead of the
default behavior, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the
ability to tell stdout apart from stderr is **not possible**
when ``pty=True``. As such, all output will appear on
``out_stream`` (see below) and be captured into the ``stdout``
result attribute. ``err_stream`` and ``stderr`` will always be
empty when ``pty=True``.
:param bool fallback:
Controls auto-fallback behavior re: problems offering a pty when
``pty=True``. Whether this has any effect depends on the specific
`Runner` subclass being invoked. Default: ``True``.
:param bool echo:
Controls whether `.run` prints the command string to local stdout
prior to executing it. Default: ``False``.
.. note::
``hide=True`` will override ``echo=True`` if both are given.
:param dict env:
By default, subprocesses recieve a copy of Invoke's own environment
(i.e. ``os.environ``). Supply a dict here to update that child
environment.
For example, ``run('command', env={'PYTHONPATH':
'/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env
var, with the rest of the child's env looking identical to the
parent.
.. seealso:: ``replace_env`` for changing 'update' to 'replace'.
:param bool replace_env:
When ``True``, causes the subprocess to receive the dictionary
given to ``env`` as its entire shell environment, instead of
updating a copy of ``os.environ`` (which is the default behavior).
Default: ``False``.
:param str encoding:
Override auto-detection of which encoding the subprocess is using
for its stdout/stderr streams (which defaults to the return value
of `default_encoding`).
:param out_stream:
A file-like stream object to which the subprocess' standard error
should be written. If ``None`` (the default), ``sys.stdout`` will
be used.
:param err_stream:
Same as ``out_stream``, except for standard error, and defaulting
to ``sys.stderr``.
:param in_stream:
A file-like stream object to used as the subprocess' standard
input. If ``None`` (the default), ``sys.stdin`` will be used.
:param dict responses:
A `dict` whose keys are regular expressions to be searched for in
the program's ``stdout`` or ``stderr``, and whose values may be any
value one desires to write into a stdin text/binary stream
(typically ``str`` or ``bytes`` objects depending on Python
version) in response.
See :doc:`/concepts/responses` for details on this functionality.
Default: ``{}``.
:param bool echo_stdin:
Whether to write data from ``in_stream`` back to ``out_stream``.
In other words, in normal interactive usage, this parameter
controls whether Invoke mirrors what you type back to your
terminal.
By default (when ``None``), this behavior is triggered by the
following:
* Not using a pty to run the subcommand (i.e. ``pty=False``),
as ptys natively echo stdin to stdout on their own;
* And when the controlling terminal of Invoke itself (as per
``in_stream``) appears to be a valid terminal device or TTY.
(Specifically, when `~invoke.util.isatty` yields a ``True``
result when given ``in_stream``.)
.. note::
This property tends to be ``False`` when piping another
program's output into an Invoke session, or when running
Invoke within another program (e.g. running Invoke from
itself).
If both of those properties are true, echoing will occur; if either
is false, no echoing will be performed.
When not ``None``, this parameter will override that auto-detection
and force, or disable, echoing.
:returns:
`Result`, or a subclass thereof.
:raises: `.Failure`, if the command exited nonzero & ``warn=False``.
:raises:
`.ThreadException` (if the background I/O threads encounter
exceptions).
:raises:
``KeyboardInterrupt``, if the user generates one during command
execution by pressing Ctrl-C.
.. note::
In normal usage, Invoke's top-level CLI tooling will catch
these & exit with return code ``130`` (typical POSIX behavior)
instead of printing a traceback and exiting ``1`` (which is
what Python normally does).
"""
# Normalize kwargs w/ config
opts, out_stream, err_stream, in_stream = self._run_opts(kwargs)
shell = opts['shell']
# Environment setup
env = self.generate_env(opts['env'], opts['replace_env'])
# Echo running command
if opts['echo']:
print("\033[1;37m{0}\033[0m".format(command))
# Start executing the actual command (runs in background)
self.start(command, shell, env)
# Arrive at final encoding if neither config nor kwargs had one
self.encoding = opts['encoding'] or self.default_encoding()
# Set up IO thread parameters (format - body_func: {kwargs})
stdout, stderr = [], []
thread_args = {
self.handle_stdout: {
'buffer_': stdout,
'hide': 'out' in opts['hide'],
'output': out_stream,
},
# TODO: make this & related functionality optional, for users who
# don't care about autoresponding & are encountering issues with
# the stdin mirroring? Downside is it fragments expected behavior &
# puts folks with true interactive use cases in a different support
# class.
self.handle_stdin: {
'input_': in_stream,
'output': out_stream,
'echo': opts['echo_stdin'],
}
}
if not self.using_pty:
thread_args[self.handle_stderr] = {
'buffer_': stderr,
'hide': 'err' in opts['hide'],
'output': err_stream,
}
# Kick off IO threads
self.threads, exceptions = [], []
for target, kwargs in six.iteritems(thread_args):
t = ExceptionHandlingThread(target=target, kwargs=kwargs)
self.threads.append(t)
t.start()
# Wait for completion, then tie things off & obtain result
# And make sure we perform that tying off even if things asplode.
exception = None
try:
self.wait()
except BaseException as e: # Make sure we nab ^C etc
exception = e
# TODO: consider consuming the KeyboardInterrupt instead of storing
# it for later raise; this would allow for subprocesses which don't
# actually exit on Ctrl-C (e.g. vim). NOTE: but this would make it
# harder to correctly detect it and exit 130 once everything wraps
# up...
# TODO: generally, but especially if we do ignore
# KeyboardInterrupt, honor other signals sent to our own process
# and transmit them to the subprocess before handling 'normally'.
# TODO: we should probably re-raise anything that's not
# KeyboardInterrupt? This is quite possibly swallowing things.
# NOTE: we handle this now instead of at actual-exception-handling
# time because otherwise the stdout/err reader threads may block
# until the subprocess exits.
if isinstance(exception, KeyboardInterrupt):
self.send_interrupt(exception)
self.program_finished.set()
for t in self.threads:
# NOTE: using a join timeout for corner case from #351 (one pipe
# excepts, fills up, prevents subproc from exiting, and other pipe
# then has a blocking read() call, causing its thread to block on
# join). In normal, non-#351 situations this should function
# similarly to a non-timeout'd join.
# NOTE: but we avoid a timeout for the stdin handler as it has its
# own termination conditions & isn't subject to this corner case.
timeout = None
if t.kwargs['target'] != self.handle_stdin:
# TODO: make the timeout configurable
timeout = 1
t.join(timeout)
e = t.exception()
if e is not None:
exceptions.append(e)
# If we got a main-thread exception while wait()ing, raise it now that
# we've closed our worker threads.
if exception is not None:
raise exception
# If any exceptions appeared inside the threads, raise them now as an
# aggregate exception object.
if exceptions:
raise ThreadException(exceptions)
stdout = ''.join(stdout)
stderr = ''.join(stderr)
if WINDOWS:
# "Universal newlines" - replace all standard forms of
# newline with \n. This is not technically Windows related
# (\r as newline is an old Mac convention) but we only apply
# the translation for Windows as that's the only platform
# it is likely to matter for these days.
stdout = stdout.replace("\r\n", "\n").replace("\r", "\n")
stderr = stderr.replace("\r\n", "\n").replace("\r", "\n")
# Get return/exit code
exited = self.returncode()
# Return, or raise as failure, our final result
result = self.generate_result(
command=command,
shell=shell,
env=env,
stdout=stdout,
stderr=stderr,
exited=exited,
pty=self.using_pty,
)
if not (result or opts['warn']):
raise Failure(result)
return result
def _run_opts(self, kwargs):
"""
Unify `run` kwargs with config options to arrive at local options.
:returns:
Four-tuple of ``(opts_dict, stdout_stream, stderr_stream,
stdin_stream)``.
"""
opts = {}
for key, value in six.iteritems(self.context.config.run):
runtime = kwargs.pop(key, None)
opts[key] = value if runtime is None else runtime
# TODO: handle invalid kwarg keys (anything left in kwargs)
# If hide was True, turn off echoing
if opts['hide'] is True:
opts['echo'] = False
# Then normalize 'hide' from one of the various valid input values,
# into a stream-names tuple.
opts['hide'] = normalize_hide(opts['hide'])
# Derive stream objects
out_stream = opts['out_stream']
if out_stream is None:
out_stream = sys.stdout
err_stream = opts['err_stream']
if err_stream is None:
err_stream = sys.stderr
in_stream = opts['in_stream']
if in_stream is None:
in_stream = sys.stdin
# Determine pty or no
self.using_pty = self.should_use_pty(opts['pty'], opts['fallback'])
# Responses
# TODO: precompile the keys into regex objects
self.responses = opts.get('responses', {})
return opts, out_stream, err_stream, in_stream
def generate_result(self, **kwargs):
"""
Create & return a suitable `Result` instance from the given ``kwargs``.
Subclasses may wish to override this in order to manipulate things or
generate a `Result` subclass (e.g. ones containing additional metadata
besides the default).
"""
return Result(**kwargs)
def read_proc_output(self, reader):
"""
Iteratively read & decode bytes from a subprocess' out/err stream.
:param reader:
A literal reader function/partial, wrapping the actual stream
object in question, which takes a number of bytes to read, and
returns that many bytes (or ``None``).
``reader`` should be a reference to either `read_proc_stdout` or
`read_proc_stderr`, which perform the actual, platform/library
specific read calls.
:returns:
A generator yielding Unicode strings (`unicode` on Python 2; `str`
on Python 3).
Specifically, each resulting string is the result of decoding
`read_chunk_size` bytes read from the subprocess' out/err stream.
"""
# NOTE: Typically, reading from any stdout/err (local, remote or
# otherwise) can be thought of as "read until you get nothing back".
# This is preferable over "wait until an out-of-band signal claims the
# process is done running" because sometimes that signal will appear
# before we've actually read all the data in the stream (i.e.: a race
# condition).
while True:
data = reader(self.read_chunk_size)
if not data:
break
yield self.decode(data)
def write_our_output(self, stream, string):
"""
Write ``string`` to ``stream``.
Also calls ``.flush()`` on ``stream`` to ensure that real terminal
streams don't buffer.
:param stream:
A file-like stream object, mapping to the ``out_stream`` or
``err_stream`` parameters of `run`.
:param string: A Unicode string object.
:returns: ``None``.
"""
# Encode under Python 2 only, because of the common problem where
# sys.stdout/err on Python 2 end up using sys.getdefaultencoding(),
# which is frequently NOT the same thing as the real local terminal
# encoding (reflected as sys.stdout.encoding). I.e. even when
# sys.stdout.encoding is UTF-8, ascii is still actually used, and
# explodes.
# Python 3 doesn't have this problem, so we delegate encoding to the
# io.*Writer classes involved.
if six.PY2:
# TODO: split up self.encoding, only use the one for 'local
# encoding' here.
string = string.encode(self.encoding)
stream.write(string)
stream.flush()
def _handle_output(self, buffer_, hide, output, reader, indices):
# TODO: store un-decoded/raw bytes somewhere as well...
for data in self.read_proc_output(reader):
# Echo to local stdout if necessary
# TODO: should we rephrase this as "if you want to hide, give me a
# dummy output stream, e.g. something like /dev/null"? Otherwise, a
# combo of 'hide=stdout' + 'here is an explicit out_stream' means
# out_stream is never written to, and that seems...odd.
if not hide:
self.write_our_output(stream=output, string=data)
# Store in shared buffer so main thread can do things with the
# result after execution completes.
# NOTE: this is threadsafe insofar as no reading occurs until after
# the thread is join()'d.
buffer_.append(data)
# Run our specific buffer & indices through the autoresponder
self.respond(buffer_, indices)
def handle_stdout(self, buffer_, hide, output):
"""
Read process' stdout, storing into a buffer & printing/parsing.
Intended for use as a thread target. Only terminates when all stdout
from the subprocess has been read.
:param list buffer_: The capture buffer shared with the main thread.
:param bool hide: Whether or not to replay data into ``output``.
:param output:
Output stream (file-like object) to write data into when not
hiding.
:returns: ``None``.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stdout,
indices=threading.local(),
)
def handle_stderr(self, buffer_, hide, output):
"""
Read process' stderr, storing into a buffer & printing/parsing.
Identical to `handle_stdout` except for the stream read from; see its
docstring for API details.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stderr,
indices=threading.local(),
)
def read_our_stdin(self, input_):
"""
Read & decode one byte from a local stdin stream.
:param input_:
Actual stream object to read from. Maps to ``in_stream`` in `run`,
so will often be ``sys.stdin``, but might be any stream-like
object.
:returns:
A Unicode string, the result of decoding the read byte (this might
be the empty string if the pipe has closed/reached EOF); or
``None`` if stdin wasn't ready for reading yet.
"""
# TODO: consider moving the character_buffered contextmanager call in
# here? Downside is it would be flipping those switches for every byte
# read instead of once per session, which could be costly (?).
byte = None
if ready_for_reading(input_):
byte = read_byte(input_)
# Decode if it appears to be binary-type. (From real terminal
# streams, usually yes; from file-like objects, often no.)
if byte and isinstance(byte, six.binary_type):
# TODO: will decoding 1 byte at a time break multibyte
# character encodings? How to square interactivity with that?
byte = self.decode(byte)
return byte
def handle_stdin(self, input_, output, echo):
"""
Read local stdin, copying into process' stdin as necessary.
Intended for use as a thread target.
.. note::
Because real terminal stdin streams have no well-defined "end", if
such a stream is detected (based on existence of a callable
``.fileno()``) this method will wait until `program_finished` is
set, before terminating.
When the stream doesn't appear to be from a terminal, the same
semantics as `handle_stdout` are used - the stream is simply
``read()`` from until it returns an empty value.
:param input_: Stream (file-like object) from which to read.
:param output: Stream (file-like object) to which echoing may occur.
:param bool echo: User override option for stdin-stdout echoing.
:returns: ``None``.
"""
with character_buffered(input_):
while True:
# Read 1 byte at a time for interactivity's sake.
char = self.read_our_stdin(input_)
if char:
# Mirror what we just read to process' stdin.
# We perform an encode so Python 3 gets bytes (streams +
# str's in Python 3 == no bueno) but skip the decode step,
# since there's presumably no need (nobody's interacting
# with this data programmatically).
self.write_proc_stdin(char)
# Also echo it back to local stdout (or whatever
# out_stream is set to) when necessary.
if echo is None:
echo = self.should_echo_stdin(input_, output)
if echo:
self.write_our_output(stream=output, string=char)
# Empty string/char/byte != None. Can't just use 'else' here.
elif char is not None:
# When reading from file-like objects that aren't "real"
# terminal streams, an empty byte signals EOF.
break
# Dual all-done signals: program being executed is done
# running, *and* we don't seem to be reading anything out of
# stdin. (NOTE: If we only test the former, we may encounter
# race conditions re: unread stdin.)
if self.program_finished.is_set() and not char:
break
# Take a nap so we're not chewing CPU.
time.sleep(self.input_sleep)
# while not self.program_finished.is_set():
# # TODO: reinstate lock/whatever thread logic from fab v1 which
# # prevents reading from stdin while other parts of the code are
# # prompting for runtime passwords? (search for 'input_enabled')
# if have_char and chan.input_enabled:
# # Send all local stdin to remote end's stdin
# #byte = msvcrt.getch() if WINDOWS else sys.stdin.read(1)
# yield self.encode(sys.stdin.read(1))
# # Optionally echo locally, if needed.
# # TODO: how to truly do this? access the out_stream which
# # isn't currently visible to us? if we just skip this part,
# # interactive users may not have their input echoed...ISTR we
# # used to assume remote would send it back down stdout/err...
# # clearly not?
# #if not using_pty and env.echo_stdin:
# # Not using fastprint() here -- it prints as 'user'
# # output level, don't want it to be accidentally hidden
# # sys.stdout.write(byte)
# # sys.stdout.flush()
def should_echo_stdin(self, input_, output):
"""
Determine whether data read from ``input_`` should echo to ``output``.
Used by `handle_stdin`; tests attributes of ``input_`` and ``output``.
:param input_: Input stream (file-like object).
:param output: Output stream (file-like object).
:returns: A ``bool``.
"""
return (not self.using_pty) and isatty(input_)
def respond(self, buffer_, indices):
"""
Write to the program's stdin in response to patterns in ``buffer_``.
The patterns and responses are driven by the key/value pairs in the
``responses`` kwarg of `run` - see its documentation for format
details, and :doc:`/concepts/responses` for a conceptual overview.
:param list buffer:
The capture buffer for this thread's particular IO stream.
:param indices:
A `threading.local` object upon which is (or will be) stored the
last-seen index for each key in ``responses``. Allows the responder
functionality to be used by multiple threads (typically, one each
for stdout and stderr) without conflicting.
:returns: ``None``.
"""
# Short-circuit if there are no responses to respond to. This saves us
# the effort of joining the buffer and so forth.
if not self.responses:
return
# Join buffer contents into a single string; without this, we can't be
# sure that the pattern we seek isn't split up across chunks in the
# buffer.
# NOTE: using string.join should be "efficient enough" for now, re:
# speed and memory use. Should that turn up false, consider using
# StringIO or cStringIO (tho the latter doesn't do Unicode well?)
# which is apparently even more efficient.
stream = u''.join(buffer_)
# Initialize seek indices
if not hasattr(indices, 'seek'):
indices.seek = {}
for pattern in self.responses:
indices.seek[pattern] = 0
for pattern, response in six.iteritems(self.responses):
# Only look at stream contents we haven't seen yet, to avoid dupes.
new_ = stream[indices.seek[pattern]:]
# Search, across lines if necessary
matches = re.findall(pattern, new_, re.S)
# Update seek index if we've matched
if matches:
indices.seek[pattern] += len(new_)
# Iterate over findall() response in case >1 match occurred.
for _ in matches:
# TODO: automatically append system-appropriate newline if
# response doesn't end with it, w/ option to disable?
self.write_proc_stdin(response)
def generate_env(self, env, replace_env):
"""
Return a suitable environment dict based on user input & behavior.
:param dict env: Dict supplying overrides or full env, depending.
:param bool replace_env:
Whether ``env`` updates, or is used in place of, the value of
`os.environ`.
:returns: A dictionary of shell environment vars.
"""
return env if replace_env else dict(os.environ, **env)
def should_use_pty(self, pty, fallback):
"""
Should execution attempt to use a pseudo-terminal?
:param bool pty:
Whether the user explicitly asked for a pty.
:param bool fallback:
Whether falling back to non-pty execution should be allowed, in
situations where ``pty=True`` but a pty could not be allocated.
"""
# NOTE: fallback not used: no falling back implemented by default.
return pty
@property
def has_dead_threads(self):
"""
Detect whether any IO threads appear to have terminated unexpectedly.
Used during process-completion waiting (in `wait`) to ensure we don't
deadlock our child process if our IO processing threads have
errored/died.
:returns:
``True`` if any threads appear to have terminated with an
exception, ``False`` otherwise.
"""
return any(x.is_dead for x in self.threads)
def wait(self):
"""
Block until the running command appears to have exited.
:returns: ``None``.
"""
while True:
proc_finished = self.process_is_finished
dead_threads = self.has_dead_threads
if proc_finished or dead_threads:
break
time.sleep(self.input_sleep)
def write_proc_stdin(self, data):
"""
Write encoded ``data`` to the running process' stdin.
:param data: A Unicode string.
:returns: ``None``.
"""
# Encode always, then request implementing subclass to perform the
# actual write to subprocess' stdin.
self._write_proc_stdin(data.encode(self.encoding))
def decode(self, data):
"""
Decode some ``data`` bytes, returning Unicode.
"""
# NOTE: yes, this is a 1-liner. The point is to make it much harder to
# forget to use 'replace' when decoding :)
return data.decode(self.encoding, 'replace')
@property
def process_is_finished(self):
"""
Determine whether our subprocess has terminated.
.. note::
The implementation of this method should be nonblocking, as it is
used within a query/poll loop.
:returns:
``True`` if the subprocess has finished running, ``False``
otherwise.
"""
raise NotImplementedError
def start(self, command, shell, env):
"""
Initiate execution of ``command`` (via ``shell``, with ``env``).
Typically this means use of a forked subprocess or requesting start of
execution on a remote system.
In most cases, this method will also set subclass-specific member
variables used in other methods such as `wait` and/or `returncode`.
"""
raise NotImplementedError
def read_proc_stdout(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stdout stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def read_proc_stderr(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stderr stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def _write_proc_stdin(self, data):
"""
Write ``data`` to running process' stdin.
This should never be called directly; it's for subclasses to implement.
See `write_proc_stdin` for the public API call.
:param data: Already-encoded byte data suitable for writing.
:returns: ``None``.
"""
raise NotImplementedError
def default_encoding(self):
"""
Return a string naming the expected encoding of subprocess streams.
This return value should be suitable for use by encode/decode methods.
"""
# TODO: probably wants to be 2 methods, one for local and one for
# subprocess. For now, good enough to assume both are the same.
#
# Based on some experiments there is an issue with
# `locale.getpreferredencoding(do_setlocale=False)` in Python 2.x on
# Linux and OS X, and `locale.getpreferredencoding(do_setlocale=True)`
# triggers some global state changes. (See #274 for discussion.)
encoding = locale.getpreferredencoding(False)
if six.PY2 and not WINDOWS:
default = locale.getdefaultlocale()[1]
if default is not None:
encoding = default
return encoding
def send_interrupt(self, interrupt):
"""
Submit an interrupt signal to the running subprocess.
:param interrupt:
The locally-sourced ``KeyboardInterrupt`` causing the method call.
:returns: ``None``.
"""
raise NotImplementedError
def returncode(self):
"""
Return the numeric return/exit code resulting from command execution.
"""
raise NotImplementedError
class Local(Runner):
"""
Execute a command on the local system in a subprocess.
.. note::
When Invoke itself is executed without a controlling terminal (e.g.
when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
present a handle on our PTY to local subprocesses. In such situations,
`Local` will fallback to behaving as if ``pty=False`` (on the theory
that degraded execution is better than none at all) as well as printing
a warning to stderr.
To disable this behavior, say ``fallback=False``.
"""
def __init__(self, context):
super(Local, self).__init__(context)
# Bookkeeping var for pty use case
self.status = None
def should_use_pty(self, pty=False, fallback=True):
use_pty = False
if pty:
use_pty = True
# TODO: pass in & test in_stream, not sys.stdin
if not has_fileno(sys.stdin) and fallback:
if not self.warned_about_pty_fallback:
sys.stderr.write("WARNING: stdin has no fileno; falling back to non-pty execution!\n") # noqa
self.warned_about_pty_fallback = True
use_pty = False
return use_pty
def read_proc_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except OSError as e:
# Only eat this specific OSError so we don't hide others
if "Input/output error" not in str(e):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data
def read_proc_stderr(self, num_bytes):
# NOTE: when using a pty, this will never be called.
# TODO: do we ever get those OSErrors on stderr? Feels like we could?
return os.read(self.process.stderr.fileno(), num_bytes)
def _write_proc_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except OSError as e:
if 'Broken pipe' not in str(e):
raise
def start(self, command, shell, env):
if self.using_pty:
if pty is None: # Encountered ImportError
sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
cols, rows = pty_size()
self.pid, self.parent_fd = pty.fork()
# If we're the child process, load up the actual command in a
# shell, just as subprocess does; this replaces our process - whose
# pipes are all hooked up to the PTY - with the "real" one.
if self.pid == 0:
# TODO: both pty.spawn() and pexpect.spawn() do a lot of
# setup/teardown involving tty.setraw, getrlimit, signal.
# Ostensibly we'll want some of that eventually, but if
# possible write tests - integration-level if necessary -
# before adding it!
#
# Set pty window size based on what our own controlling
# terminal's window size appears to be.
# TODO: make subroutine?
winsize = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
# Use execve for bare-minimum "exec w/ variable # args + env"
# behavior. No need for the 'p' (use PATH to find executable)
# for now.
# TODO: see if subprocess is using equivalent of execvp...
os.execve(shell, [shell, '-c', command], env)
else:
self.process = Popen(
command,
shell=True,
executable=shell,
env=env,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
@property
def process_is_finished(self):
if self.using_pty:
# NOTE:
# https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
# implies that Linux "requires" use of the blocking, non-WNOHANG
# version of this call. Our testing doesn't verify this, however,
# so...
# NOTE: It does appear to be totally blocking on Windows, so our
# issue #351 may be totally unsolvable there. Unclear.
pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
return pid_val != 0
else:
return self.process.poll() is not None
def send_interrupt(self, interrupt):
# NOTE: No need to reraise the interrupt since we have full control
# over the local process and can kill it.
if self.using_pty:
os.kill(self.pid, SIGINT)
else:
# Use send_signal with platform-appropriate signal (Windows doesn't
# support SIGINT unfortunately, only SIGTERM).
# NOTE: could use subprocess.terminate() (which is cross-platform)
# but feels best to use SIGINT as much as we possibly can as it's
# most appropriate. terminate() always sends SIGTERM.
# NOTE: in interactive POSIX terminals, this is technically
# unnecessary as Ctrl-C submits the INT to the entire foreground
# process group (which will be both Invoke and its spawned
# subprocess). However, it doesn't seem to hurt, & ensures that a
# *non-interactive* SIGINT is forwarded correctly.
self.process.send_signal(SIGINT if not WINDOWS else SIGTERM)
def returncode(self):
if self.using_pty:
return os.WEXITSTATUS(self.status)
else:
return self.process.returncode
class Result(object):
"""
A container for information about the result of a command execution.
See individual attribute/method documentation below for details.
.. note::
`Result` objects' truth evaluation is equivalent to their `.ok`
attribute's value. Therefore, quick-and-dirty expressions like the
following are possible::
if run("some shell command"):
do_something()
else:
handle_problem()
"""
# TODO: inherit from namedtuple instead? heh
def __init__(self, command, shell, env, stdout, stderr, exited, pty):
#: The command which was executed.
self.command = command
#: The shell binary used for execution.
self.shell = shell
#: The shell environment used for execution.
self.env = env
#: An integer representing the subprocess' exit/return code.
self.exited = exited
#: An alias for `.exited`.
self.return_code = exited
#: The subprocess' standard output, as a multiline string.
self.stdout = stdout
#: Same as `.stdout` but containing standard error (unless the process
#: was invoked via a pty; see `.Runner.run`.)
self.stderr = stderr
#: A boolean describing whether the subprocess was invoked with a pty
#: or not; see `.Runner.run`.
self.pty = pty
def __nonzero__(self):
# NOTE: This is the method that (under Python 2) determines Boolean
# behavior for objects.
return self.ok
def __bool__(self):
# NOTE: And this is the Python 3 equivalent of __nonzero__. Much better
# name...
return self.__nonzero__()
def __str__(self):
ret = ["Command exited with status {0}.".format(self.exited)]
for x in ('stdout', 'stderr'):
val = getattr(self, x)
ret.append(u"""=== {0} ===
{1}
""".format(x, val.rstrip()) if val else u"(no {0})".format(x))
return u"\n".join(ret)
@property
def ok(self):
"""
A boolean equivalent to ``exited == 0``.
"""
return self.exited == 0
@property
def failed(self):
"""
The inverse of ``ok``.
I.e., ``True`` if the program exited with a nonzero return code, and
``False`` otherwise.
"""
return not self.ok
def normalize_hide(val):
hide_vals = (None, False, 'out', 'stdout', 'err', 'stderr', 'both', True)
if val not in hide_vals:
err = "'hide' got {0!r} which is not in {1!r}"
raise ValueError(err.format(val, hide_vals))
if val in (None, False):
hide = ()
elif val in ('both', True):
hide = ('out', 'err')
elif val == 'stdout':
hide = ('out',)
elif val == 'stderr':
hide = ('err',)
else:
hide = (val,)
return hide
|
trezor.py
|
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable)
try:
import trezorlib
import trezorlib.transport
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
import traceback
traceback.print_exc()
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
client.reset_device(
strength=64 * (item + 2), # 128, 192 or 256
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=recovery_type,
word_count=6 * (item + 2), # 12, 18 or 24
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise UserFacingException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
monitor_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import pytest
import subprocess
import time
import ray
from ray.test.test_utils import run_and_get_output
def _test_cleanup_on_driver_exit(num_redis_shards):
stdout = run_and_get_output([
"ray",
"start",
"--head",
"--num-redis-shards",
str(num_redis_shards),
])
lines = [m.strip() for m in stdout.split("\n")]
init_cmd = [m for m in lines if m.startswith("ray.init")]
assert 1 == len(init_cmd)
redis_address = init_cmd[0].split("redis_address=\"")[-1][:-2]
def StateSummary():
obj_tbl_len = len(ray.global_state.object_table())
task_tbl_len = len(ray.global_state.task_table())
func_tbl_len = len(ray.global_state.function_table())
return obj_tbl_len, task_tbl_len, func_tbl_len
def Driver(success):
success.value = True
# Start driver.
ray.init(redis_address=redis_address)
summary_start = StateSummary()
if (0, 1) != summary_start[:2]:
success.value = False
max_attempts_before_failing = 100
# Two new objects.
ray.get(ray.put(1111))
ray.get(ray.put(1111))
attempts = 0
while (2, 1, summary_start[2]) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
@ray.remote
def f():
ray.put(1111) # Yet another object.
return 1111 # A returned object as well.
# 1 new function.
attempts = 0
while (2, 1, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.get(f.remote())
attempts = 0
while (4, 2, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.shutdown()
success = multiprocessing.Value('b', False)
driver = multiprocessing.Process(target=Driver, args=(success, ))
driver.start()
# Wait for client to exit.
driver.join()
time.sleep(3)
# Just make sure Driver() is run and succeeded. Note(rkn), if the below
# assertion starts failing, then the issue may be that the summary
# values computed in the Driver function are being updated slowly and
# so the call to StateSummary() is getting outdated values. This could
# be fixed by looping until StateSummary() returns the desired values.
assert success.value
# Check that objects, tasks, and functions are cleaned up.
ray.init(redis_address=redis_address)
# The assertion below can fail if the monitor is too slow to clean up
# the global state.
assert (0, 1) == StateSummary()[:2]
ray.shutdown()
subprocess.Popen(["ray", "stop"]).wait()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_single_redis_shard():
_test_cleanup_on_driver_exit(num_redis_shards=1)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_many_redis_shards():
_test_cleanup_on_driver_exit(num_redis_shards=5)
_test_cleanup_on_driver_exit(num_redis_shards=31)
|
test_sampling.py
|
from functools import partial
import threading
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from numpy.lib import NumpyVersion
from scipy.stats import (
TransformedDensityRejection,
DiscreteAliasUrn,
NumericalInversePolynomial
)
from scipy.stats import UNURANError
from scipy import stats
from scipy import special
from scipy.stats import chisquare, cramervonmises
from scipy.stats._distr_params import distdiscrete, distcont
# common test data: this data can be shared between all the tests.
# Normal distribution shared between all the continuous methods
class StandardNormal:
def pdf(self, x):
return np.exp(-0.5 * x*x)
def dpdf(self, x):
return -x * np.exp(-0.5 * x*x)
def cdf(self, x):
return special.ndtr(x)
# A binomial distribution to share between all the discrete methods
class Binomial:
def __init__(self, n, p):
self.n = n
self.p = p
def pmf(self, k):
return self.p**k * (1-self.p)**(self.n-k)
def cdf(self, k):
k = np.asarray(k)
return stats.binom._cdf(k, self.n, self.p)
def support(self):
return 0, self.n
all_methods = [
("TransformedDensityRejection", {"dist": StandardNormal()}),
("DiscreteAliasUrn", {"dist": [0.02, 0.18, 0.8]}),
("NumericalInversePolynomial", {"dist": StandardNormal()})
]
# Make sure an internal error occurs in UNU.RAN when invalid callbacks are
# passed. Moreover, different generators throw different error messages.
# So, in case of an `UNURANError`, we do not validate the error message.
bad_pdfs_common = [
# Negative PDF
(lambda x: -x, UNURANError, r"..."),
# Returning wrong type
(lambda x: [], TypeError, r"must be real number, not list"),
# Undefined name inside the function
(lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa
# Infinite value returned => Overflow error.
(lambda x: np.inf, UNURANError, r"..."),
# NaN value => internal error in UNU.RAN
(lambda x: np.nan, UNURANError, r"..."),
# signature of PDF wrong
(lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
]
# Make sure an internal error occurs in UNU.RAN when invalid callbacks are
# passed. Moreover, different generators throw different error messages.
# So, in case of an `UNURANError`, we do not validate the messages.
bad_dpdf_common = [
# Infinite value returned.
(lambda x: np.inf, UNURANError, r"..."),
# NaN value => internal error in UNU.RAN
(lambda x: np.nan, UNURANError, r"..."),
# Returning wrong type
(lambda x: [], TypeError, r"must be real number, not list"),
# Undefined name inside the function
(lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa
# signature of dPDF wrong
(lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
]
bad_pv_common = [
([], r"must contain at least one element"),
([[1.0, 0.0]], r"wrong number of dimensions \(expected 1, got 2\)"),
([0.2, 0.4, np.nan, 0.8], r"must contain only finite / non-nan values"),
([0.2, 0.4, np.inf, 0.8], r"must contain only finite / non-nan values"),
([0.0, 0.0], r"must contain at least one non-zero value"),
]
# size of the domains is incorrect
bad_sized_domains = [
# > 2 elements in the domain
((1, 2, 3), ValueError, r"must be a length 2 tuple"),
# empty domain
((), ValueError, r"must be a length 2 tuple")
]
# domain values are incorrect
bad_domains = [
((2, 1), UNURANError, r"left >= right"),
((1, 1), UNURANError, r"left >= right"),
]
# infinite and nan values present in domain.
inf_nan_domains = [
# left >= right
((10, 10), UNURANError, r"left >= right"),
((np.inf, np.inf), UNURANError, r"left >= right"),
((-np.inf, -np.inf), UNURANError, r"left >= right"),
((np.inf, -np.inf), UNURANError, r"left >= right"),
# Also include nans in some of the domains.
((-np.inf, np.nan), ValueError, r"only non-nan values"),
((np.nan, np.inf), ValueError, r"only non-nan values")
]
# `nan` values present in domain. Some distributions don't support
# infinite tails, so don't mix the nan values with infinities.
nan_domains = [
((0, np.nan), ValueError, r"only non-nan values"),
((np.nan, np.nan), ValueError, r"only non-nan values")
]
# all the methods should throw errors for nan, bad sized, and bad valued
# domains.
@pytest.mark.parametrize("domain, err, msg",
bad_domains + bad_sized_domains +
nan_domains) # type: ignore[operator]
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_bad_domain(domain, err, msg, method, kwargs):
Method = getattr(stats, method)
with pytest.raises(err, match=msg):
Method(**kwargs, domain=domain)
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_random_state(method, kwargs):
Method = getattr(stats, method)
# simple seed that works for any version of NumPy
seed = 123
rng1 = Method(**kwargs, random_state=seed)
rng2 = Method(**kwargs, random_state=seed)
assert_equal(rng1.rvs(100), rng2.rvs(100))
# global seed
np.random.seed(123)
rng1 = Method(**kwargs)
rvs1 = rng1.rvs(100)
np.random.seed(None)
rng2 = Method(**kwargs, random_state=123)
rvs2 = rng2.rvs(100)
assert_equal(rvs1, rvs2)
# RandomState seed for old numpy
if NumpyVersion(np.__version__) < '1.19.0':
seed1 = np.random.RandomState(123)
seed2 = 123
rng1 = Method(**kwargs, random_state=seed1)
rng2 = Method(**kwargs, random_state=seed2)
assert_equal(rng1.rvs(100), rng2.rvs(100))
rvs11 = rng1.rvs(550)
rvs12 = rng1.rvs(50)
rvs2 = rng2.rvs(600)
assert_equal(rvs11, rvs2[:550])
assert_equal(rvs12, rvs2[550:])
else: # Generator seed for new NumPy
# when a RandomState is given, it should take the bitgen_t
# member of the class and create a Generator instance.
seed1 = np.random.RandomState(np.random.MT19937(123))
seed2 = np.random.Generator(np.random.MT19937(123))
rng1 = Method(**kwargs, random_state=seed1)
rng2 = Method(**kwargs, random_state=seed2)
assert_equal(rng1.rvs(100), rng2.rvs(100))
def test_set_random_state():
rng1 = TransformedDensityRejection(StandardNormal(), random_state=123)
rng2 = TransformedDensityRejection(StandardNormal())
rng2.set_random_state(123)
assert_equal(rng1.rvs(100), rng2.rvs(100))
rng = TransformedDensityRejection(StandardNormal(), random_state=123)
rvs1 = rng.rvs(100)
rng.set_random_state(123)
rvs2 = rng.rvs(100)
assert_equal(rvs1, rvs2)
def test_threading_behaviour():
# Test if the API is thread-safe.
# This verifies if the lock mechanism and the use of `PyErr_Occurred`
# is correct.
errors = {"err1": None, "err2": None}
class Distribution:
def __init__(self, pdf_msg):
self.pdf_msg = pdf_msg
def pdf(self, x):
if 49.9 < x < 50.0:
raise ValueError(self.pdf_msg)
return x
def dpdf(self, x):
return 1
def func1():
dist = Distribution('foo')
rng = TransformedDensityRejection(dist, domain=(10, 100),
random_state=12)
try:
rng.rvs(100000)
except ValueError as e:
errors['err1'] = e.args[0]
def func2():
dist = Distribution('bar')
rng = TransformedDensityRejection(dist, domain=(10, 100),
random_state=2)
try:
rng.rvs(100000)
except ValueError as e:
errors['err2'] = e.args[0]
t1 = threading.Thread(target=func1)
t2 = threading.Thread(target=func2)
t1.start()
t2.start()
t1.join()
t2.join()
assert errors['err1'] == 'foo'
assert errors['err2'] == 'bar'
@pytest.mark.parametrize("method, kwargs", all_methods)
def test_pickle(method, kwargs):
Method = getattr(stats, method)
rng1 = Method(**kwargs, random_state=123)
obj = pickle.dumps(rng1)
rng2 = pickle.loads(obj)
assert_equal(rng1.rvs(100), rng2.rvs(100))
@pytest.mark.parametrize("size", [None, 0, (0, ), 1, (10, 3), (2, 3, 4, 5),
(0, 0), (0, 1)])
def test_rvs_size(size):
# As the `rvs` method is present in the base class and shared between
# all the classes, we can just test with one of the methods.
rng = TransformedDensityRejection(StandardNormal())
if size is None:
assert np.isscalar(rng.rvs(size))
else:
if np.isscalar(size):
size = (size, )
assert rng.rvs(size).shape == size
def check_cont_samples(rng, dist, mv_ex):
rvs = rng.rvs(100000)
mv = rvs.mean(), rvs.var()
# test the moments only if the variance is finite
if np.isfinite(mv_ex[1]):
assert_allclose(mv, mv_ex, rtol=1e-7, atol=1e-1)
# Cramer Von Mises test for goodness-of-fit
rvs = rng.rvs(500)
dist.cdf = np.vectorize(dist.cdf)
pval = cramervonmises(rvs, dist.cdf).pvalue
assert pval > 0.1
def check_discr_samples(rng, pv, mv_ex):
rvs = rng.rvs(100000)
# test if the first few moments match
mv = rvs.mean(), rvs.var()
assert_allclose(mv, mv_ex, rtol=1e-3, atol=1e-1)
# normalize
pv = pv / pv.sum()
# chi-squared test for goodness-of-fit
obs_freqs = np.zeros_like(pv)
_, freqs = np.unique(rvs, return_counts=True)
freqs = freqs / freqs.sum()
obs_freqs[:freqs.size] = freqs
pval = chisquare(obs_freqs, pv).pvalue
assert pval > 0.1
class TestTransformedDensityRejection:
# Simple Custom Distribution
class dist0:
def pdf(self, x):
return 3/4 * (1-x*x)
def dpdf(self, x):
return 3/4 * (-2*x)
def cdf(self, x):
return 3/4 * (x - x**3/3 + 2/3)
def support(self):
return -1, 1
# Standard Normal Distribution
class dist1:
def pdf(self, x):
return stats.norm._pdf(x / 0.1)
def dpdf(self, x):
return -x / 0.01 * stats.norm._pdf(x / 0.1)
def cdf(self, x):
return stats.norm._cdf(x / 0.1)
# pdf with piecewise linear function as transformed density
# with T = -1/sqrt with shift. Taken from UNU.RAN test suite
# (from file t_tdr_ps.c)
class dist2:
def __init__(self, shift):
self.shift = shift
def pdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
return 0.5 * y * y
def dpdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
y = y * y * y
return y if (x < 0.) else -y
def cdf(self, x):
x -= self.shift
if x <= 0.:
return 0.5 / (1. - x)
else:
return 1. - 0.5 / (1. + x)
dists = [dist0(), dist1(), dist2(0.), dist2(10000.)]
# exact mean and variance of the distributions in the list dists
mv0 = [0., 4./15.]
mv1 = [0., 0.01]
mv2 = [0., np.inf]
mv3 = [10000., np.inf]
mvs = [mv0, mv1, mv2, mv3]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
with suppress_warnings() as sup:
# filter the warnings thrown by UNU.RAN
sup.filter(RuntimeWarning)
rng = TransformedDensityRejection(dist, random_state=42)
check_cont_samples(rng, dist, mv_ex)
# PDF 0 everywhere => bad construction points
bad_pdfs = [(lambda x: 0, UNURANError, r"50 : bad construction points.")]
bad_pdfs += bad_pdfs_common # type: ignore[arg-type]
@pytest.mark.parametrize("pdf, err, msg", bad_pdfs)
def test_bad_pdf(self, pdf, err, msg):
class dist:
pass
dist.pdf = pdf
dist.dpdf = lambda x: 1 # an arbitrary dPDF
with pytest.raises(err, match=msg):
TransformedDensityRejection(dist)
@pytest.mark.parametrize("dpdf, err, msg", bad_dpdf_common)
def test_bad_dpdf(self, dpdf, err, msg):
class dist:
pass
dist.pdf = lambda x: x
dist.dpdf = dpdf
with pytest.raises(err, match=msg):
TransformedDensityRejection(dist, domain=(1, 10))
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
TransformedDensityRejection(StandardNormal(), domain=domain)
@pytest.mark.parametrize("construction_points", [-1, 0, 0.1])
def test_bad_construction_points_scalar(self, construction_points):
with pytest.raises(ValueError, match=r"`construction_points` must be "
r"a positive integer."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
def test_bad_construction_points_array(self):
# empty array
construction_points = []
with pytest.raises(ValueError, match=r"`construction_points` must "
r"either be a "
r"scalar or a non-empty array."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points not monotonically increasing
construction_points = [1, 1, 1, 1, 1, 1]
with pytest.warns(RuntimeWarning, match=r"33 : starting points not "
r"strictly monotonically "
r"increasing"):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points containing nans
construction_points = [np.nan, np.nan, np.nan]
with pytest.raises(UNURANError, match=r"50 : bad construction "
r"points."):
TransformedDensityRejection(
StandardNormal(), construction_points=construction_points
)
# construction_points out of domain
construction_points = [-10, 10]
with pytest.warns(RuntimeWarning, match=r"50 : starting point out of "
r"domain"):
TransformedDensityRejection(
StandardNormal(), domain=(-3, 3),
construction_points=construction_points
)
@pytest.mark.parametrize("c", [-1., np.nan, np.inf, 0.1, 1.])
def test_bad_c(self, c):
msg = r"`c` must either be -0.5 or 0."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(StandardNormal(), c=-1.)
def test_bad_variant(self):
msg = r"Invalid option for the `variant`"
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(StandardNormal(), variant='foo')
u = [np.linspace(0, 1, num=1000), [], [[]], [np.nan],
[-np.inf, np.nan, np.inf], 0,
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]]
@pytest.mark.parametrize("u", u)
def test_ppf_hat(self, u):
# Increase the `max_squeeze_hat_ratio` so the ppf_hat is more
# accurate.
rng = TransformedDensityRejection(StandardNormal(),
max_squeeze_hat_ratio=0.9999,
max_intervals=10000)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf_hat(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-3, atol=1e-5)
assert res.shape == expected.shape
def test_bad_dist(self):
# Empty distribution
class dist:
...
msg = r"`pdf` required but not found."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(dist)
# dPDF not present in dist
class dist:
pdf = lambda x: 1-x*x # noqa: E731
msg = r"`dpdf` required but not found."
with pytest.raises(ValueError, match=msg):
TransformedDensityRejection(dist)
class TestDiscreteAliasUrn:
# DAU fails on these probably because of large domains and small
# computation errors in PMF. Mean/SD match but chi-squared test fails.
basic_fail_dists = {
'nchypergeom_fisher', # numerical erros on tails
'nchypergeom_wallenius', # numerical erros on tails
'randint' # fails on 32-bit ubuntu
}
@pytest.mark.parametrize("distname, params", distdiscrete)
def test_basic(self, distname, params):
if distname in self.basic_fail_dists:
msg = ("DAU fails on these probably because of large domains "
"and small computation errors in PMF.")
pytest.skip(msg)
if not isinstance(distname, str):
dist = distname
else:
dist = getattr(stats, distname)
dist = dist(*params)
domain = dist.support()
if not np.isfinite(domain[1] - domain[0]):
# DAU only works with finite domain. So, skip the distributions
# with infinite tails.
pytest.skip("DAU only works with a finite domain.")
k = np.arange(domain[0], domain[1]+1)
pv = dist.pmf(k)
mv_ex = dist.stats('mv')
rng = DiscreteAliasUrn(dist, random_state=42)
check_discr_samples(rng, pv, mv_ex)
# Can't use bad_pmf_common here as we evaluate PMF early on to avoid
# unhelpful errors from UNU.RAN.
bad_pmf = [
# inf returned
(lambda x: np.inf, ValueError,
r"must contain only finite / non-nan values"),
# nan returned
(lambda x: np.nan, ValueError,
r"must contain only finite / non-nan values"),
# all zeros
(lambda x: 0.0, ValueError,
r"must contain at least one non-zero value"),
# Undefined name inside the function
(lambda x: foo, NameError, # type: ignore[name-defined] # noqa
r"name 'foo' is not defined"),
# Returning wrong type.
(lambda x: [], ValueError,
r"setting an array element with a sequence."),
# probabilities < 0
(lambda x: -x, UNURANError,
r"50 : probability < 0"),
# signature of PMF wrong
(lambda: 1.0, TypeError,
r"takes 0 positional arguments but 1 was given")
]
@pytest.mark.parametrize("pmf, err, msg", bad_pmf)
def test_bad_pmf(self, pmf, err, msg):
class dist:
pass
dist.pmf = pmf
with pytest.raises(err, match=msg):
DiscreteAliasUrn(dist, domain=(1, 10))
@pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def test_sampling_with_pv(self, pv):
pv = np.asarray(pv, dtype=np.float64)
rng = DiscreteAliasUrn(pv, random_state=123)
rvs = rng.rvs(100_000)
pv = pv / pv.sum()
variates = np.arange(0, len(pv))
# test if the first few moments match
m_expected = np.average(variates, weights=pv)
v_expected = np.average((variates - m_expected) ** 2, weights=pv)
mv_expected = m_expected, v_expected
check_discr_samples(rng, pv, mv_expected)
@pytest.mark.parametrize("pv, msg", bad_pv_common)
def test_bad_pv(self, pv, msg):
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(pv)
# DAU doesn't support infinite tails. So, it should throw an error when
# inf is present in the domain.
inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
(0, np.inf), (-np.inf, 0)]
@pytest.mark.parametrize("domain", inf_domain)
def test_inf_domain(self, domain):
with pytest.raises(ValueError, match=r"must be finite"):
DiscreteAliasUrn(Binomial(10, 0.2), domain=domain)
def test_bad_urn_factor(self):
with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."):
DiscreteAliasUrn([0.5, 0.5], urn_factor=-1)
def test_bad_args(self):
msg = (r"`domain` must be provided when the "
r"probability vector is not available.")
class dist:
def pmf(self, x):
return x
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(dist)
class TestNumericalInversePolynomial:
# Simple Custom Distribution
class dist0:
def pdf(self, x):
return 3/4 * (1-x*x)
def cdf(self, x):
return 3/4 * (x - x**3/3 + 2/3)
def support(self):
return -1, 1
# Standard Normal Distribution
class dist1:
def pdf(self, x):
return stats.norm._pdf(x / 0.1)
def cdf(self, x):
return stats.norm._cdf(x / 0.1)
# Sin 2 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist2:
def pdf(self, x):
return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
def cdf(self, x):
return (0.05*(x + 1) +
0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
(4.*np.pi))
def support(self):
return -1, 1
# Sin 10 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class dist3:
def pdf(self, x):
return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
def cdf(self, x):
return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
np.cos(2*np.pi*x))
def support(self):
return -5, 5
dists = [dist0(), dist1(), dist2(), dist3()]
# exact mean and variance of the distributions in the list dists
mv0 = [0., 4./15.]
mv1 = [0., 0.01]
mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2]
mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2]
mvs = [mv0, mv1, mv2, mv3]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
def test_basic(self, dist, mv_ex):
rng = NumericalInversePolynomial(dist, random_state=42)
check_cont_samples(rng, dist, mv_ex)
very_slow_dists = ['studentized_range', 'trapezoid', 'triang', 'vonmises',
'levy_stable', 'kappa4', 'ksone', 'kstwo', 'levy_l',
'gausshyper', 'anglit']
# for some reason, UNU.RAN segmentation faults for the uniform.
fatal_fail_dists = ['uniform']
# fails for unbounded PDFs
unbounded_pdf_fail_dists = ['beta']
# for these distributions, some assertions fail due to minor
# numerical differences. They can be avoided either by changing
# the seed or by increasing the u_resolution.
fail_dists = ['ncf', 'pareto', 'chi2', 'fatiguelife', 'halfgennorm',
'gilbrat', 'lognorm', 'ncx2', 't']
@pytest.mark.xslow
@pytest.mark.parametrize("distname, params", distcont)
def test_basic_all_scipy_dists(self, distname, params):
if distname in self.very_slow_dists:
pytest.skip(f"PINV too slow for {distname}")
if distname in self.fail_dists:
pytest.skip(f"PINV fails for {distname}")
if distname in self.unbounded_pdf_fail_dists:
pytest.skip("PINV fails for unbounded PDFs.")
if distname in self.fatal_fail_dists:
pytest.xfail(f"PINV segmentation faults for {distname}")
dist = (getattr(stats, distname)
if isinstance(distname, str)
else distname)
dist = dist(*params)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
rng = NumericalInversePolynomial(dist, random_state=42)
check_cont_samples(rng, dist, [dist.mean(), dist.var()])
@pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common)
def test_bad_pdf(self, pdf, err, msg):
class dist:
pass
dist.pdf = pdf
with pytest.raises(err, match=msg):
NumericalInversePolynomial(dist, domain=[0, 5])
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
NumericalInversePolynomial(StandardNormal(), domain=domain)
u = [
# test if quantile 0 and 1 return -inf and inf respectively and check
# the correctness of the PPF for equidistant points between 0 and 1.
np.linspace(0, 1, num=10000),
# test the PPF method for empty arrays
[], [[]],
# test if nans and infs return nan result.
[np.nan], [-np.inf, np.nan, np.inf],
# test if a scalar is returned for a scalar input.
0,
# test for arrays with nans, values greater than 1 and less than 0,
# and some valid values.
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
]
@pytest.mark.parametrize("u", u)
def test_ppf(self, u):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.ppf(u)
expected = stats.norm.ppf(u)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan],
[-np.inf, np.nan, np.inf], 0,
[[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]]
@pytest.mark.parametrize("x", x)
def test_cdf(self, x):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, keep_cdf=True,
u_resolution=1e-14)
# Older versions of NumPy throw RuntimeWarnings for comparisons
# with nan.
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in greater")
sup.filter(RuntimeWarning, "invalid value encountered in "
"greater_equal")
sup.filter(RuntimeWarning, "invalid value encountered in less")
sup.filter(RuntimeWarning, "invalid value encountered in "
"less_equal")
res = rng.cdf(x)
expected = stats.norm.cdf(x)
assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
assert res.shape == expected.shape
def test_u_error(self):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist, u_resolution=1e-10)
max_error, mae = rng.u_error()
assert max_error < 1e-10
assert mae <= max_error
rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
max_error, mae = rng.u_error()
assert max_error < 1e-14
assert mae <= max_error
bad_orders = [1, 4.5, 20, np.inf, np.nan]
bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan]
bad_max_intervals = [10, 10000000, 1000.5, np.inf, np.nan]
@pytest.mark.parametrize("order", bad_orders)
def test_bad_orders(self, order):
dist = StandardNormal()
msg = r"`order` must be an integer in the range \[3, 17\]."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(dist, order=order)
@pytest.mark.parametrize("u_resolution", bad_u_resolution)
def test_bad_u_resolution(self, u_resolution):
msg = r"`u_resolution` must be between 1e-15 and 1e-5."
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(StandardNormal(),
u_resolution=u_resolution)
@pytest.mark.parametrize("max_intervals", bad_max_intervals)
def test_bad_max_intervals(self, max_intervals):
msg = (r"`max_intervals` must be an integer in the range "
r"\[100, 1000000\].")
with pytest.raises(ValueError, match=msg):
NumericalInversePolynomial(StandardNormal(),
max_intervals=max_intervals)
def test_bad_args(self):
dist = StandardNormal()
rng = NumericalInversePolynomial(dist)
msg = r"CDF is not available."
with pytest.raises(ValueError, match=msg):
rng.cdf([1, 2, 3])
msg = r"`sample_size` must be greater than or equal to 1000."
with pytest.raises(ValueError, match=msg):
rng.u_error(10)
class Distribution:
def pdf(self, x):
return np.exp(-0.5 * x*x)
dist = Distribution()
rng = NumericalInversePolynomial(dist)
msg = r"Exact CDF required but not found."
with pytest.raises(ValueError, match=msg):
rng.u_error()
|
threds.py
|
from threading import Thread
import time
a = 0 # global variable
def thread1(threadname):
global a
for k in range(100):
print("{} {}".format(threadname, a))
time.sleep(0.1)
if k == 5:
a += 100
def thread2(threadname):
global a
for k in range(50):
a += 1
time.sleep(0.2)
thread1 = Thread(target=thread1, args=("Thread-1",))
thread2 = Thread(target=thread2, args=("Thread-2",))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
|
tests.py
|
import base64
import threading
from typing import Callable
from typing import Dict
from typing import Optional
from unittest.mock import patch
import warnings
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import Client
from django.test import override_settings
from django.test import TestCase
from django.urls import reverse
# Compensation for the fact that django or other middleware may do some internal queries
from allianceutils.middleware import CurrentUserMiddleware
from test_allianceutils.tests.middleware.views import reset_thread_wait_barrier
QUERY_COUNT_OVERHEAD = 0
def execute_request(client: object,
url_path: str,
data: Dict[str, str] = {},
thread_count: int = 1,
prehook: Callable = None,
):
"""
Execute a request, optionally on multiple threads
:param url_path: URL path to request
:param data: POST variables
:param thread_count: number of threads to create & run this request in
:return: a list of responses if `thread_pool` more than 1, otherwise a single response
"""
thread_exceptions = []
thread_responses = []
def do_request(client=None, count=None):
try:
if prehook:
client = prehook(client, count)
response = client.post(path=url_path, data=data)
thread_responses.append(response)
except Exception as ex:
thread_exceptions.append(ex)
raise
if thread_count == 1:
do_request(client, 0)
return thread_responses[0]
threads = [threading.Thread(target=do_request, args=(client, count)) for count in range(thread_count)]
for t in threads:
t.start()
for t in threads:
t.join()
if thread_exceptions:
raise Exception(f'Found {len(thread_exceptions)} exception(s): {thread_exceptions}')
return thread_responses
class CurrentUserMiddlewareTestCase(TestCase):
def setUp(self):
self.client = Client()
self.username = 'user@ue.c'
self.password = 'password'
user = get_user_model().objects.create_user(email=self.username, password=self.password)
self.user_id = user.id
self.path = reverse('middleware:current_user')
def tearDown(self):
get_user_model().objects.all().delete()
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'allianceutils.middleware.CurrentUserMiddleware',
))
def test_able_to_get_none_from_middleware_when_anonymous(self):
user = self.client.post(path=self.path).json()['username']
self.assertEqual(user, None)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'allianceutils.middleware.CurrentUserMiddleware',
))
def test_able_to_get_current_user_from_middleware(self):
self.client.login(username=self.username, password=self.password)
user = self.client.post(path=self.path).json()['username']
self.assertEqual(user, self.username)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'allianceutils.middleware.CurrentUserMiddleware',
))
def test_able_to_get_current_user_from_middleware_from_respective_threads(self):
def create_user_and_login(client, count):
client = Client()
count = str(count)
get_user_model().objects.create_user(email=count, password=count)
client.login(username=count, password=count)
return client
THREAD_COUNTS = 13
responses = execute_request(
client=None,
url_path=self.path,
thread_count=THREAD_COUNTS,
prehook=create_user_and_login
)
usernames = set([response.json()['username'] for response in responses])
expected_usernames = set([str(i) for i in range(THREAD_COUNTS)])
self.assertEqual(usernames, expected_usernames)
def test_not_active_for_thread(self):
with self.assertRaisesRegex(KeyError, f"Thread .* not already registered with CurrentUserMiddleware"):
CurrentUserMiddleware.get_user()
class QueryCountMiddlewareTestCase(TestCase):
def setUp(self):
# If we don't use the same client for each request then the middleware is recreated each time
# (ie it's as if every request is running in a new preocess)
self.client = Client()
def assert_warning_count(self,
expected_warnings: int,
expected_log_warnings: int,
count: int,
set_threshold: Optional[int]=None,
throw_exception: bool=False,
thread_count: int = 1,
):
"""
Make a request that executes `count` queries, and validate that the expected number of
warnings is generated
:param expected_warnings: number of expected python warnings module warnings
:param expected_log_warnings: number of expected python logging module warnings
:param count: number of queryies to run
:param set_threshold: override request.QUERY_COUNT_WARNING_THRESHOLD to this value
:param throw_exception: whether the request should throw an exception before returning
:param thread_count: number of threads to create & run this request in
"""
data = {
'count': str(int(count)),
'throw_exception': str(throw_exception),
}
if set_threshold is not None:
data['set_threshold'] = str(set_threshold)
reset_thread_wait_barrier(thread_count if thread_count > 1 else 0)
try:
with warnings.catch_warnings(record=True) as w:
with patch('allianceutils.middleware.query_count.logger.warning', autospec=True) as mock_logger_warning:
warnings.simplefilter('always')
execute_request(
client=self.client,
url_path=reverse('middleware:run_queries'),
data=data,
thread_count=thread_count,
)
self.assertEqual(len(w), expected_warnings)
self.assertEqual(expected_log_warnings, mock_logger_warning.call_count)
finally:
reset_thread_wait_barrier(0)
def test_do_nothing(self):
"""
Ensure no false positives
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD / 2)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_not_exceeded(self):
"""
Queries less than threshold
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_exceeded(self):
"""
Queries more than threshold
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD + settings.QUERY_COUNT_WARNING_THRESHOLD)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
@override_settings(
MIDDLEWARE=settings.MIDDLEWARE + (
'allianceutils.middleware.QueryCountMiddleware',
'allianceutils.middleware.QueryCountMiddleware',
),
)
def test_duplicate_middleware(self):
"""
Middleware included more than once;
- first copy should work as normal
- second copy should give a warning that it is a duplicate
"""
self.assert_warning_count(1, 0, 0)
self.assert_warning_count(1, 1, settings.QUERY_COUNT_WARNING_THRESHOLD * 2)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_increase_query_count(self):
"""
Can temporarily increasing the query count threshold
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD + 9, set_threshold=settings.QUERY_COUNT_WARNING_THRESHOLD + 10)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD + 10,set_threshold=settings.QUERY_COUNT_WARNING_THRESHOLD + 10)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD + 9, set_threshold=settings.QUERY_COUNT_WARNING_THRESHOLD + 10)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_disable_query_count(self):
"""
Query count threshold can be temporarily disabled
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD * 2, set_threshold='')
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD * 2)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD * 2, set_threshold='')
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_exception(self):
"""
QueryCountMiddleware works even if an exception is thrown
"""
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD, throw_exception=True)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1, throw_exception=True)
self.assert_warning_count(0, 0, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD - 1)
self.assert_warning_count(0, 1, settings.QUERY_COUNT_WARNING_THRESHOLD - QUERY_COUNT_OVERHEAD)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.QueryCountMiddleware',))
def test_query_count_threaded(self):
"""
test that QueryCountMiddleware works correctly with a multithreaded wsgi server
"""
# because the queries are running in a different thread, there's an extra query as part of creating a new DB connection
# for extra fun, it only happens on the *second* request
thread_count = 4
# django runs some extra queries for each new thread
# the exact number depends on the django version
url_path = reverse('middleware:query_overhead')
responses = execute_request(client=self.client, url_path=url_path, thread_count=thread_count)
request_overheads = [r.json()['data'] for r in responses]
self.assertEqual(len(set(request_overheads)), 1)
request_overhead = request_overheads[0]
query_count_threshold = settings.QUERY_COUNT_WARNING_THRESHOLD - request_overhead - QUERY_COUNT_OVERHEAD - 1
self.assert_warning_count(0, 0, 0, thread_count=thread_count)
self.assert_warning_count(0, 0, query_count_threshold, thread_count=thread_count)
self.assert_warning_count(0, thread_count, query_count_threshold + 1, thread_count=thread_count)
self.assert_warning_count(0, 0, query_count_threshold, thread_count=thread_count)
self.assert_warning_count(0, thread_count, query_count_threshold + 1, thread_count=thread_count)
class HttpAuthMiddlewareTestCase(TestCase):
username = 'TehLocalFooties'
password = 'Toazted:Mushr0m'
def setUp(self):
self.client = Client()
@override_settings(HTTP_AUTH_USERNAME=username, HTTP_AUTH_PASSWORD=password)
def test_site_accessible_without_middleware(self):
resp = self.client.get(path="/")
self.assertEqual(resp.status_code, 404)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.HttpAuthMiddleware',))
def test_site_accessible_with_middleware_but_no_config(self):
resp = self.client.get(path="/")
self.assertEqual(resp.status_code, 404)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.HttpAuthMiddleware',), HTTP_AUTH_USERNAME=username, HTTP_AUTH_PASSWORD=password)
def test_site_inaccessible_without_any_auth(self):
resp = self.client.get(path="/")
self.assertEqual(resp.status_code, 401)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.HttpAuthMiddleware',), HTTP_AUTH_USERNAME=username, HTTP_AUTH_PASSWORD=password)
def test_site_inaccessible_with_incorrect_auth(self):
auth_headers = {'HTTP_AUTHORIZATION': 'Basic ' + str(base64.b64encode(b'a:b'), 'utf-8')}
resp = self.client.get(path="/", **auth_headers)
self.assertEqual(resp.status_code, 401)
@override_settings(MIDDLEWARE=settings.MIDDLEWARE + ('allianceutils.middleware.HttpAuthMiddleware',), HTTP_AUTH_USERNAME=username, HTTP_AUTH_PASSWORD=password)
def test_site_accessible_with_correct_auth(self):
auth_headers = {'HTTP_AUTHORIZATION': 'Basic ' + str(base64.b64encode(f'{self.username}:{self.password}'.encode()), 'utf-8')}
resp = self.client.get(path="/", **auth_headers)
self.assertEqual(resp.status_code, 404)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum_grs
from electrum_grs.gui import messages
from electrum_grs import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum_grs.bitcoin import COIN, is_address
from electrum_grs.plugin import run_hook, BasePlugin
from electrum_grs.i18n import _
from electrum_grs.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError, parse_max_spend)
from electrum_grs.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum_grs.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum_grs.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_grs.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum_grs.version import ELECTRUM_VERSION
from electrum_grs.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum_grs.exchange_rate import FxThread
from electrum_grs.simple_config import SimpleConfig
from electrum_grs.logging import Logger
from electrum_grs.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum_grs.lnaddr import lndecode, LnInvoiceException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit, SizedFreezableLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-GRS - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-GRS"
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://groestlcoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("https://www.groestlcoin.org/forum/")).setShortcut(QKeySequence.HelpContents)
# if not constants.net.TESTNET:
# help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('groestlcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-GRS",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-GRS - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-GRS", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-GRS", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both groestlcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 GRS (0.00042 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '0.00042 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a bitcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("groestlcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = (_("Recipient of the funds.") + "\n\n"
+ _("You may enter a Bitcoin address, a label from your list of contacts "
"(a list of completions will be proposed), "
"or an alias (email-like address that forwards to a Bitcoin address)") + ". "
+ _("Lightning invoices are also supported.") + "\n\n"
+ _("You can also pay to many outputs in a single transaction, "
"specifying one output per line.") + "\n" + _("Format: address, amount") + "\n"
+ _("To set the amount to 'max', use the '!' special character.") + "\n"
+ _("Integers weights can also be used in conjunction with '!', "
"e.g. set one amount to '2!' and another to '3!' to split your coins 40-60."))
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = (_('The amount to be received by the recipient.') + ' '
+ _('Fees are paid by the sender.') + '\n\n'
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' '
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n'
+ _('Keyboard shortcut: type "!" to send all your coins.'))
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Groestlcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if any(parse_max_spend(outval) for outval in output_values):
output_value = '!'
else:
output_value = sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.payto_e.lightning_invoice = invoice
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_grs,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_grs.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(WWLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(WWLabel(basename), 0, 1)
grid.addWidget(WWLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(WWLabel(wallet_type), 1, 1)
grid.addWidget(WWLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(WWLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(WWLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(WWLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(WWLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(WWLabel(ks_type), 4, 1)
# lightning
grid.addWidget(WWLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(WWLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(WWLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(WWLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(WWLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(WWLabel(_("Derivation path") + ':'))
der_path_text = WWLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(WWLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_grs.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_grs import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-grs-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = _(
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(msg))
msg2 = _("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(msg2))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
opcua_py.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""OPC UA South Plugin using the FreeOPCUA Python OPC UA Library"""
import copy
import os
import logging
import time
import asyncio
import uuid
import json
from threading import Thread
from foglamp.common import logger
from foglamp.plugins.common import utils
import async_ingest
from opcua import Client, Node
__author__ = "David Henthorn, Rose-Hulman Institute of Technology"
__copyright__ = "Copyright (c) 2019"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=logging.INFO)
c_callback = None
c_ingest_ref = None
loop = None
t = None
_CONFIG_CATEGORY_NAME = 'OPCUA_PY'
_CONFIG_CATEGORY_DESCRIPTION = 'South Plugin OPC UA in Python'
_DEFAULT_CONFIG = {
'plugin': {
'description': 'OPC UA South Plugin in Python',
'type': 'string',
'default': 'opcua_py',
'readonly': 'true'
},
'url': {
'description': 'OPC UA server connection string (opc.tcp)',
'type': 'string',
'default': 'opc.tcp://historian.local:9409/DvOPC',
'order': '1',
'displayName': 'Host'
},
'userName': {
'description': 'User name, if needed (leave blank if unused)',
'type': 'string',
'default': '',
'order': '2',
'displayName': 'User Name'
},
'password': {
'description': 'Password (leave blank if unused)',
'type': 'string',
'default': '',
'order': '3',
'displayName': 'Password'
},
'subscriptions': {
'description': 'JSON list of nodes to subscribe to',
'type': 'JSON',
'order': '4',
'displayName': 'OPC UA Nodes to monitor through subscriptions',
'default' : '{ "subscriptions" : [ "ns=2;s=0:FIT-321.CV", "ns=2;s=0:TE200-07/AI1/OUT.CV", "ns=2;s=0:TE200-12/AI1/OUT.CV" ] }'
}
}
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'opcua_py plugin',
'version': '1.7.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Initialise the plugin.
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
handle = copy.deepcopy(config)
return handle
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
"""
_LOGGER.info("opcua_py: Old config for {} \n new config {}".format(handle, new_config))
# plugin_shutdown
plugin_shutdown(handle)
# plugin_init
new_handle = plugin_init(new_config)
# plugin_start
plugin_start(new_handle)
return new_handle
def plugin_start(handle):
global loop, t
_LOGGER.info("opcua_py plugin_start called")
url = handle['url']['value']
userName = handle['userName']['value']
password = handle['password']['value']
_LOGGER.info('opcua_py: Attempting to connect to %s', url)
client = Client(url=url)
if userName:
_LOGGER.info('opcua_py: Attempting to connect to OPC UA server with username and password.')
client.set_user(userName)
client.set_password(password)
else:
_LOGGER.info('opcua_py: Attempting to connect anonymously to OPC UA server.')
client.connect()
#Need to add some error checking on the connection
subs = json.loads(handle['subscriptions']['value'])
subs = subs["subscriptions"]
_LOGGER.info('opcua_py: Attempting to subscribe to %s', subs)
nodes = []
for sub in subs:
nodes.append(client.get_node(sub))
handler = SubscriptionHandler()
# We create a Client Subscription.
subscription = client.create_subscription(500, handler)
# We subscribe to data changes for our nodes (variables).
subscription.subscribe_data_change(nodes)
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
def _plugin_stop(handle):
_LOGGER.info('opcua_py: Stopping OPCUA Python plugin.')
global loop
loop.stop()
def plugin_shutdown(handle):
""" Shuts down the plugin doing required cleanup, to be called prior to the South plugin service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
plugin shutdown
"""
global client
global subscription
global _LOGGER
global _callback_event_loop
subscription.unsubscribe(nodes)
subscription.delete()
client.disconnect()
_plugin_stop(handle)
_LOGGER.info('opcua_py has shut down.')
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
class SubscriptionHandler:
"""
The SubscriptionHandler is used to handle the data that is received for the subscription.
"""
def datachange_notification(self, node: Node, val, data):
"""
Callback for OPC UA Subscription.
This method will be called when the Client received a data change message from the Server.
"""
time_stamp = utils.local_timestamp()
asset_name = str(node)
#Trim the string to start with ns=
ns_start = asset_name.find('ns=')
if ns_start != -1:
asset_name = asset_name[ns_start:]
#Some OPC UA servers add extra parentheses, so remove them
#Remove any extraneous parentheses
asset_name = asset_name.replace("(","")
asset_name = asset_name.replace(")","")
#_LOGGER.info('opcua_py: datachange_notification %r %s', asset_name, val)
key = str(uuid.uuid4())
data = {
'asset': asset_name,
'timestamp': time_stamp, # metric.timestamp
'key' : key,
'readings': {"value": val}
}
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
|
test.py
|
import cv2
import numpy as np
import threading
def test():
while 1:
img1=cv2.imread('captured car1.jpg')
print("{}".format(img1.shape))
print("{}".format(img1))
cv2.imshow('asd',img1)
cv2.waitKey(1)
t1 = threading.Thread(target=test)
t1.start()
|
NormalFattree.py
|
# Copyright (C) 2016 Huang MaChi at Chongqing University
# of Posts and Telecommunications, China.
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, Intf, TCLink
from mininet.topo import Topo
import argparse
import logging
import os
import sys
import signal
from ryu.app.experiments.readfile import readIpeers
from subprocess import Popen
from multiprocessing import Process
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
import random
parser = argparse.ArgumentParser(description="Parameters importation")
parser.add_argument('--k', dest='k', type=int, default=4, choices=[4, 8], help="Switch fanout number")
parser.add_argument('--duration', dest='duration', type=int, default=60, help="Duration (sec) for each iperf traffic generation")
parser.add_argument('--dir', dest='output_dir', help="Directory to store outputs")
parser.add_argument('--cpu', dest='cpu', type=float, default=1.0, help='Total CPU to allocate to hosts')
args = parser.parse_args()
class Fattree(Topo):
"""
Class of Fattree Topology.
"""
CoreSwitchList = []
AggSwitchList = []
EdgeSwitchList = []
HostList = []
def __init__(self, k, density):
self.pod = k
self.density = density
self.iCoreLayerSwitch = (k/2)**2
self.iAggLayerSwitch = k*k/2
self.iEdgeLayerSwitch = k*k/2
self.iHost = self.iEdgeLayerSwitch * density
# Init Topo
Topo.__init__(self)
def createNodes(self):
self.createCoreLayerSwitch(self.iCoreLayerSwitch)
self.createAggLayerSwitch(self.iAggLayerSwitch)
self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)
self.createHost(self.iHost)
# Create Switch and Host
def _addSwitch(self, number, level, switch_list):
"""
Create switches.
"""
for i in xrange(1, number+1):
PREFIX = str(level) + "00"
if i >= 10:
PREFIX = str(level) + "0"
switch_list.append(self.addSwitch(PREFIX + str(i)))
def createCoreLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 1, self.CoreSwitchList)
def createAggLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 2, self.AggSwitchList)
def createEdgeLayerSwitch(self, NUMBER):
self._addSwitch(NUMBER, 3, self.EdgeSwitchList)
def createHost(self, NUMBER):
"""
Create hosts.
"""
for i in xrange(1, NUMBER+1):
if i >= 100:
PREFIX = "h"
elif i >= 10:
PREFIX = "h0"
else:
PREFIX = "h00"
self.HostList.append(self.addHost(PREFIX + str(i), cpu=1.0/NUMBER))
def createLinks(self, bw_c2a=100, bw_a2e=50, bw_e2h=25):
"""
Add network links.
"""
# Core to Agg
end = self.pod/2
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.CoreSwitchList[i*end+j],
self.AggSwitchList[x+i],
bw=bw_c2a, max_queue_size=1000,loss=random.random()) # use_htb=False
# Agg to Edge
for x in xrange(0, self.iAggLayerSwitch, end):
for i in xrange(0, end):
for j in xrange(0, end):
self.addLink(
self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],
bw=bw_a2e, max_queue_size=1000,loss=random.random()) # use_htb=False
# Edge to Host
for x in xrange(0, self.iEdgeLayerSwitch):
for i in xrange(0, self.density):
self.addLink(
self.EdgeSwitchList[x],
self.HostList[self.density * x + i],
bw=bw_e2h, max_queue_size=1000,loss=random.random()) # use_htb=False
def set_ovs_protocol_13(self,):
"""
Set the OpenFlow version for switches.
"""
self._set_ovs_protocol_13(self.CoreSwitchList)
self._set_ovs_protocol_13(self.AggSwitchList)
self._set_ovs_protocol_13(self.EdgeSwitchList)
def _set_ovs_protocol_13(self, sw_list):
for sw in sw_list:
cmd = "sudo ovs-vsctl set bridge %s protocols=OpenFlow13" % sw
os.system(cmd)
def set_host_ip(net, topo):
hostlist = []
for k in xrange(len(topo.HostList)):
hostlist.append(net.get(topo.HostList[k]))
i = 1
j = 1
for host in hostlist:
host.setIP("10.%d.0.%d" % (i, j))
j += 1
if j == topo.density+1:
j = 1
i += 1
def create_subnetList(topo, num):
"""
Create the subnet list of the certain Pod.
"""
subnetList = []
remainder = num % (topo.pod/2)
if topo.pod == 4:
if remainder == 0:
subnetList = [num-1, num]
elif remainder == 1:
subnetList = [num, num+1]
else:
pass
elif topo.pod == 8:
if remainder == 0:
subnetList = [num-3, num-2, num-1, num]
elif remainder == 1:
subnetList = [num, num+1, num+2, num+3]
elif remainder == 2:
subnetList = [num-1, num, num+1, num+2]
elif remainder == 3:
subnetList = [num-2, num-1, num, num+1]
else:
pass
else:
pass
return subnetList
def install_proactive(net, topo):
"""
Install proactive flow entries for switches.
"""
# Edge Switch
for sw in topo.EdgeSwitchList:
num = int(sw[-2:])
# Downstream.
for i in xrange(1, topo.density + 1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,ip, \
nw_dst=10.%d.0.%d,actions=output:%d'" % (sw, num, i, topo.pod / 2 + i)
os.system(cmd)
# Aggregate Switch
for sw in topo.AggSwitchList:
num = int(sw[-2:])
subnetList = create_subnetList(topo, num)
k = 1
for i in subnetList:
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
mpls_label=%d, actions=pop_mpls:0x0800,output:%d'" % (sw, topo.pod / 2 + k, topo.pod / 2 + k)
os.system(cmd)
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
mpls_label=%d,actions=pop_mpls:0x8847,output:%d'" % (sw, k, k)
os.system(cmd)
k += 1
# Core Switch
for sw in topo.CoreSwitchList:
j = 1
k = 1
for i in xrange(1, len(topo.EdgeSwitchList)+1):
cmd = "ovs-ofctl add-flow %s -O OpenFlow13 \
'table=0,idle_timeout=0,hard_timeout=0,priority=10,mpls, \
mpls_label=%d,actions=pop_mpls:0x8847,output:%d'" % (sw, j, j)
os.system(cmd)
k += 1
if k == topo.pod/2 + 1:
j += 1
k = 1
def iperfTest(net, topo):
"""
Start iperf test.
"""
h001, h015, h016 = net.get(
topo.HostList[0], topo.HostList[14], topo.HostList[15])
# iperf Server
h001.popen('iperf -s -u -i 1 > iperf_server_differentPod_result', shell=True)
# iperf Server
h015.popen('iperf -s -u -i 1 > iperf_server_samePod_result', shell=True)
# iperf Client
h016.cmdPrint('iperf -c ' + h001.IP() + ' -u -t 10 -i 1 -b 10m')
h016.cmdPrint('iperf -c ' + h015.IP() + ' -u -t 10 -i 1 -b 10m')
def monitor_devs_ng(fname="./txrate.txt", interval_sec=0.1):
"""
Use bwm-ng tool to collect interface transmit rate statistics.
bwm-ng Mode: rate;
interval time: 1s.
"""
cmd = "sleep 1; bwm-ng -t %s -o csv -u bits -T rate -C ',' > %s" % (interval_sec * 1000, fname)
Popen(cmd, shell=True).wait()
def traffic_generation(net,flows_peers):
"""
Generate traffics and test the performance of the network.
"""
# 1. Start iperf. (Elephant flows)
# Start the servers.
serversList = set([peer[1] for peer in flows_peers])
for server in serversList:
# filename = server[1:]
server = net.get(server)
# server.cmd("iperf -s > %s/%s &" % (args.output_dir, 'server'+filename+'.txt'))
server.cmd("iperf -s > /dev/null &" ) # Its statistics is useless, just throw away.
time.sleep(3)
# Start the clients.
for src, dest in flows_peers:
time.sleep(1)
server = net.get(dest)
client = net.get(src)
# filename = src[1:]
# client.cmd("iperf -c %s -t %d > %s/%s &" % (server.IP(), args.duration, args.output_dir, 'client'+filename+'.txt'))
# client.cmd("iperf -c %s -t %d -M 1250 > %s/output_%s_%s.txt &" % (server.IP(), 60,args.output_dir,src,dest)) # Its statistics is useless, just throw away. 1990 just means a great number.
client.cmd("iperf -c %s -t %d -M 1250 > /dev/null &" % (server.IP(), 60))
# time.sleep(1)
#monitor = Process(target = monitor_devs_ng, args = ('%s/bwmng.txt' % args.output_dir, 1.0))
monitor = Process(target=monitor_devs_ng, args=('./results/bwmng.txt', 1.0))
monitor.start()
# Wait for the traffic to become stable.
time.sleep(10)
# 3. The experiment is going on.
time.sleep(args.duration + 5)
monitor.terminate()
os.system('killall iperf')
def pingTest(net):
"""
Start ping test.
"""
net.pingAll()
def removeOldFiles():
cmd="sudo rm -f NormalroutingResult/*"
os.system(cmd)
cmd="sudo rm -f SRroutingResult/*"
os.system(cmd)
cmd="sudo rm -f resultSolve/count_text.txt"
os.system(cmd)
def createTopo(pod, density, ip="192.168.16.128", port=6653, bw_c2a=1000, bw_a2e=500, bw_e2h=250):
"""
Create network topology and run the Mininet.
"""
topo = Fattree(pod, density)
topo.createNodes()
topo.createLinks(bw_c2a=bw_c2a, bw_a2e=bw_a2e, bw_e2h=bw_e2h)
# Start Mininet.
CONTROLLER_IP = ip
CONTROLLER_PORT = port
net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True)
net.addController(
'controller', controller=RemoteController,
ip=CONTROLLER_IP, port=CONTROLLER_PORT)
net.start()
# Set OVS's protocol as OF13.
topo.set_ovs_protocol_13()
# Set hosts IP addresses.
set_host_ip(net, topo)
# Install proactive flow entries
install_proactive(net, topo)
iperf_peers=readIpeers()
# 2. Start the controller.
k_paths = args.k ** 2 / 4
fanout = args.k
Controller_Ryu = Popen("ryu-manager --observe-links Normalrouting.py --k_paths=%d --weight=hop --fanout=%d" % (k_paths, fanout), shell=True, preexec_fn=os.setsid)
# Wait until the controller has discovered network topology.
time.sleep(60)
# 3. Generate traffics and test the performance of the network.
traffic_generation(net, iperf_peers)
time.sleep(60)
# os.killpg(Controller_Ryu.pid, signal.SIGKILL)
# Controller_Ryu = Popen("ryu-manager --observe-links SRrouting.py --k_paths=%d --weight=hop --fanout=%d" % (k_paths, fanout),shell=True, preexec_fn=os.setsid)
# # Wait until the controller has discovered network topology.
# time.sleep(60)
# # 3. Generate traffics and test the performance of the network.
# traffic_generation(net, readIpeers(),'SRrouting')
# time.sleep(60)
# CLI(net)
# Stop the controller.
os.killpg(Controller_Ryu.pid, signal.SIGKILL)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
if os.getuid() != 0:
logging.debug("You are NOT root")
elif os.getuid() == 0:
removeOldFiles()
logging.debug("remove old result files")
time.sleep(3)
createTopo(4, 2)
# createTopo(8, 4)
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for a in range(1000000):
i+=1
a+=1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for a in range(1000000):
i-=1
a+=1
def main():
# TODO: Something is missing here (needed to print i)
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
background_caching_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to build and run background caching job.
For internal use only; no backwards-compatibility guarantees.
A background caching job is a job that captures events for all capturable
sources of a given pipeline. With Interactive Beam, one such job is started when
a pipeline run happens (which produces a main job in contrast to the background
caching job) and meets the following conditions:
#. The pipeline contains capturable sources, configured through
interactive_beam.options.capturable_sources.
#. No such background job is running.
#. No such background job has completed successfully and the cached events are
still valid (invalidated when capturable sources change in the pipeline).
Once started, the background caching job runs asynchronously until it hits some
capture limit configured in interactive_beam.options. Meanwhile, the main job
and future main jobs from the pipeline will run using the deterministic
replayable captured events until they are invalidated.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import threading
import time
import apache_beam as beam
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.caching import streaming_cache
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class BackgroundCachingJob(object):
"""A simple abstraction that controls necessary components of a timed and
space limited background caching job.
A background caching job successfully completes source data capture in 2
conditions:
#. The job is finite and runs into DONE state;
#. The job is infinite but hits an interactive_beam.options configured limit
and gets cancelled into CANCELLED/CANCELLING state.
In both situations, the background caching job should be treated as done
successfully.
"""
def __init__(self, pipeline_result, limiters):
self._pipeline_result = pipeline_result
self._condition_checker = threading.Thread(
target=self._background_caching_job_condition_checker, daemon=True)
# Limiters are checks s.t. if any are triggered then the background caching
# job gets cancelled.
self._limiters = limiters
self._condition_checker.start()
def _background_caching_job_condition_checker(self):
while not PipelineState.is_terminal(self._pipeline_result.state):
if self._should_end_condition_checker():
self.cancel()
break
time.sleep(0.5)
def _should_end_condition_checker(self):
return any([l.is_triggered() for l in self._limiters])
def is_done(self):
is_terminated = self._pipeline_result.state is PipelineState.DONE
is_triggered = self._should_end_condition_checker()
is_cancelling = (
self._pipeline_result.state in (
PipelineState.CANCELLED, PipelineState.CANCELLING))
return is_terminated or (is_triggered and is_cancelling)
def is_running(self):
return self._pipeline_result.state is PipelineState.RUNNING
def cancel(self):
"""Cancels this background caching job.
"""
if not PipelineState.is_terminal(self._pipeline_result.state):
try:
self._pipeline_result.cancel()
except NotImplementedError:
# Ignore the cancel invocation if it is never implemented by the runner.
pass
def attempt_to_run_background_caching_job(runner, user_pipeline, options=None):
"""Attempts to run a background caching job for a user-defined pipeline.
The pipeline result is automatically tracked by Interactive Beam in case
future cancellation/cleanup is needed.
"""
if is_background_caching_job_needed(user_pipeline):
# Cancel non-terminal jobs if there is any before starting a new one.
attempt_to_cancel_background_caching_job(user_pipeline)
# Cancel the gRPC server serving the test stream if there is one.
attempt_to_stop_test_stream_service(user_pipeline)
# TODO(BEAM-8335): refactor background caching job logic from
# pipeline_instrument module to this module and aggregate tests.
from apache_beam.runners.interactive import pipeline_instrument as instr
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
user_pipeline.to_runner_api(use_fake_coders=True), runner, options)
background_caching_job_result = beam.pipeline.Pipeline.from_runner_api(
instr.build_pipeline_instrument(
runner_pipeline).background_caching_pipeline_proto(),
runner,
options).run()
limiters = ie.current_env().options.capture_control.limiters()
ie.current_env().set_background_caching_job(
user_pipeline,
BackgroundCachingJob(background_caching_job_result, limiters=limiters))
def is_background_caching_job_needed(user_pipeline):
"""Determines if a background caching job needs to be started.
It does several state checks and record state changes throughout the process.
It is not idempotent to simplify the usage.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
# Checks if the pipeline contains any source that needs to be cached.
need_cache = has_source_to_cache(user_pipeline)
# If this is True, we can invalidate a previous done/running job if there is
# one.
cache_changed = is_source_to_cache_changed(user_pipeline)
# When capture replay is disabled, cache is always needed for capturable
# sources (if any).
if need_cache and not ie.current_env().options.enable_capture_replay:
from apache_beam.runners.interactive.options import capture_control
capture_control.evict_captured_data()
return True
return (
need_cache and
# Checks if it's the first time running a job from the pipeline.
(
not job or
# Or checks if there is no previous job.
# DONE means a previous job has completed successfully and the
# cached events might still be valid.
not (
job.is_done() or
# RUNNING means a previous job has been started and is still
# running.
job.is_running()) or
# Or checks if we can invalidate the previous job.
cache_changed))
def is_cache_complete(pipeline_id):
# type: (str) -> bool
"""Returns True if the backgrond cache for the given pipeline is done.
"""
user_pipeline = ie.current_env().pipeline_id_to_pipeline(pipeline_id)
job = ie.current_env().get_background_caching_job(user_pipeline)
is_done = job and job.is_done()
cache_changed = is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=False)
return is_done and not cache_changed
def has_source_to_cache(user_pipeline):
"""Determines if a user-defined pipeline contains any source that need to be
cached. If so, also immediately wrap current cache manager held by current
interactive environment into a streaming cache if this has not been done.
The wrapping doesn't invalidate existing cache in any way.
This can help determining if a background caching job is needed to write cache
for sources and if a test stream service is needed to serve the cache.
Throughout the check, if source-to-cache has changed from the last check, it
also cleans up the invalidated cache early on.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
has_cache = instr.has_unbounded_sources(user_pipeline)
if has_cache:
if not isinstance(ie.current_env().get_cache_manager(user_pipeline,
create_if_absent=True),
streaming_cache.StreamingCache):
ie.current_env().set_cache_manager(
streaming_cache.StreamingCache(
ie.current_env().get_cache_manager(user_pipeline)._cache_dir,
is_cache_complete=is_cache_complete,
sample_resolution_sec=1.0),
user_pipeline)
return has_cache
def attempt_to_cancel_background_caching_job(user_pipeline):
"""Attempts to cancel background caching job for a user-defined pipeline.
If no background caching job needs to be cancelled, NOOP. Otherwise, cancel
such job.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
if job:
job.cancel()
def attempt_to_stop_test_stream_service(user_pipeline):
"""Attempts to stop the gRPC server/service serving the test stream.
If there is no such server started, NOOP. Otherwise, stop it.
"""
if is_a_test_stream_service_running(user_pipeline):
ie.current_env().evict_test_stream_service_controller(user_pipeline).stop()
def is_a_test_stream_service_running(user_pipeline):
"""Checks to see if there is a gPRC server/service running that serves the
test stream to any job started from the given user_pipeline.
"""
return ie.current_env().get_test_stream_service_controller(
user_pipeline) is not None
def is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=True):
"""Determines if there is any change in the sources that need to be cached
used by the user-defined pipeline.
Due to the expensiveness of computations and for the simplicity of usage, this
function is not idempotent because Interactive Beam automatically discards
previously tracked signature of transforms and tracks the current signature of
transforms for the user-defined pipeline if there is any change.
When it's True, there is addition/deletion/mutation of source transforms that
requires a new background caching job.
"""
# By default gets empty set if the user_pipeline is first time seen because
# we can treat it as adding transforms.
recorded_signature = ie.current_env().get_cached_source_signature(
user_pipeline)
current_signature = extract_source_to_cache_signature(user_pipeline)
is_changed = not current_signature.issubset(recorded_signature)
# The computation of extract_unbounded_source_signature is expensive, track on
# change by default.
if is_changed and update_cached_source_signature:
options = ie.current_env().options
# No info needed when capture replay is disabled.
if options.enable_capture_replay:
if not recorded_signature:
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
_LOGGER.info(
'Interactive Beam has detected unbounded sources in your pipeline. '
'In order to have a deterministic replay, a segment of data will '
'be recorded from all sources for %s seconds or until a total of '
'%s have been written to disk.',
options.capture_duration.total_seconds(),
sizeof_fmt(options.capture_size_limit))
else:
_LOGGER.info(
'Interactive Beam has detected a new streaming source was '
'added to the pipeline. In order for the cached streaming '
'data to start at the same time, all captured data has been '
'cleared and a new segment of data will be recorded.')
ie.current_env().cleanup(user_pipeline)
ie.current_env().set_cached_source_signature(
user_pipeline, current_signature)
return is_changed
def extract_source_to_cache_signature(user_pipeline):
"""Extracts a set of signature for sources that need to be cached in the
user-defined pipeline.
A signature is a str representation of urn and payload of a source.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
unbounded_sources_as_applied_transforms = instr.unbounded_sources(
user_pipeline)
unbounded_sources_as_ptransforms = set(
map(lambda x: x.transform, unbounded_sources_as_applied_transforms))
_, context = user_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
signature = set(
map(
lambda transform: str(transform.to_runner_api(context)),
unbounded_sources_as_ptransforms))
return signature
|
wsdump.py
|
#!/Users/Reiki/.pyenv/versions/3.6.1/bin/python
import argparse
import code
import sys
import threading
import time
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
Network.py
|
import argparse
import socket
import threading
from time import sleep
import random
import RDT
## Provides an abstraction for the network layer
class NetworkLayer:
#configuration parameters
prob_pkt_loss = 0
prob_byte_corr = 0
prob_pkt_reorder = 0
#class variables
sock = None
conn = None
buffer_S = ''
lock = threading.Lock()
collect_thread = None
stop = None
socket_timeout = 0.1
reorder_msg_S = None
def __init__(self, role_S, server_S, port):
if role_S == 'client':
print('Network: role is client')
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((server_S, port))
self.conn.settimeout(self.socket_timeout)
elif role_S == 'server':
print('Network: role is server')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('localhost', port))
self.sock.listen(1)
self.conn, addr = self.sock.accept()
self.conn.settimeout(self.socket_timeout)
#start the thread to receive data on the connection
self.collect_thread = threading.Thread(name='Collector', target=self.collect)
self.stop = False
self.collect_thread.start()
def disconnect(self):
if self.collect_thread:
self.stop = True
self.collect_thread.join()
def __del__(self):
if self.sock is not None: self.sock.close()
if self.conn is not None: self.conn.close()
def udt_send(self, msg_S):
#return without sending if the packet is being dropped
if random.random() < self.prob_pkt_loss:
return
#corrupt a packet
if random.random() < self.prob_byte_corr:
start = random.randint(RDT.Packet.length_S_length,len(msg_S)-5) #make sure we are not corrupting the length field,
#since that makes life really difficult
num = random.randint(1,5)
repl_S = ''.join(random.sample('XXXXX', num)) #sample length >= num
msg_S = msg_S[:start]+repl_S+msg_S[start+num:]
#reorder packets - either hold a packet back, or if one held back then send both
if random.random() < self.prob_pkt_reorder or self.reorder_msg_S:
if self.reorder_msg_S is None:
self.reorder_msg_S = msg_S
return None
else:
msg_S += self.reorder_msg_S
self.reorder_msg_S = None
#keep calling send until all the bytes are transferred
totalsent = 0
while totalsent < len(msg_S):
sent = self.conn.send(msg_S[totalsent:].encode('utf-8'))
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
## Receive data from the network and save in internal buffer
def collect(self):
# print (threading.currentThread().getName() + ': Starting')
while(True):
try:
recv_bytes = self.conn.recv(2048)
with self.lock:
self.buffer_S += recv_bytes.decode('utf-8')
# you may need to uncomment the BlockingIOError handling on Windows machines
# except BlockingIOError as err:
# pass
except socket.timeout as err:
pass
if self.stop:
# print (threading.currentThread().getName() + ': Ending')
return
## Deliver collected data to client
def udt_receive(self):
with self.lock:
ret_S = self.buffer_S
self.buffer_S = ''
return ret_S
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Network layer implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
network = NetworkLayer(args.role, args.server, args.port)
if args.role == 'client':
network.udt_send('MSG_FROM_CLIENT')
sleep(6)
print(network.udt_receive())
network.disconnect()
else:
sleep(1)
print(network.udt_receive())
network.udt_send('MSG_FROM_SERVER')
network.disconnect()
|
TFLite_detection_webcam.bak.py
|
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/27/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a live webcam
# feed. It draws boxes and scores around the objects of interest in each frame from the
# webcam. To improve FPS, the webcam object runs in a separate thread from the main program.
# This script will work with either a Picamera or regular USB webcam.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
|
bot.py
|
""" Simple notify-by-schedule telegram bot
- coding: utf-8 -
Author: shchuko
Email: yaroshchuk2000@gmail.com
Website: github.com/shchuko
"""
import time
import schedule
import datetime
import random
import os
import threading
from datetime import timedelta
from datetime import date
from calendar import monthrange
import telebot
from telebot.apihelper import ApiTelegramException
import botmessages as bmsg
print('I\'m alive!')
token = os.environ['TOKEN']
chat_id = os.environ['CHAT_ID']
print('Env vars reading successful')
fixed_date = datetime.datetime(2019, 4, 10)
bot = telebot.TeleBot(token)
class CleaningReminder:
def __init__(self, token, chat_id, start_date):
self.bot = telebot.TeleBot(token)
self.chat_id = chat_id
self.start_date = start_date
self.room_list_1 = ['1001', '1002', '1003', '1004', '1005', '1006', '1007', '1008', '1009', '1010', '1011']
self.room_list_2 = ['1012', '1013', '1014', '1015', '1016', '1017', '1018', '1019', '1020', '1021', '1022',
'1023']
def add_remind_time(self, remind_time, remind_day_of_month):
# 21:01 UTC 31.12 == 00:01 MOW 01.01
schedule.every().day.at('21:01').do(
lambda: self.bot.send_message(chat_id,
"Happy New Year!") if date.today().day == 31 and date.today().month == 12 else lambda: None)
schedule.every().day.at(remind_time).do(
lambda: self.__clean_reminder() if date.today().day == remind_day_of_month else lambda: None)
def polling(self):
thread = threading.Thread(target=self.__polling_loop)
thread.start()
def __clean_reminder(self):
message = bmsg.clean_headers[random.randint(0, len(bmsg.clean_headers) - 1)] + '\n'
day_date = datetime.datetime.today()
days_in_month = monthrange(day_date.year, day_date.month)[1]
for i in range(days_in_month):
room_first = self.room_list_1[(day_date - self.start_date).days % len(self.room_list_1)]
room_second = self.room_list_2[(day_date - self.start_date).days % len(self.room_list_2)]
message += bmsg.clean_body.format(day_date.strftime("%d.%m"), room_first, room_second)
day_date += timedelta(days=1)
message += bmsg.clean_hashtag
message_info = self.bot.send_message(chat_id, message)
try:
self.bot.unpin_all_chat_messages(chat_id)
except ApiTelegramException:
pass # Sometimes telegram responses with 429 for some reason
self.bot.pin_chat_message(chat_id, message_info.message_id)
def __polling_loop(self):
while True:
schedule.run_pending()
time.sleep(1)
@bot.message_handler(commands=['start'])
def handle_start(message):
bot.reply_to(message, bmsg.start + bmsg.hlp)
@bot.message_handler(commands=['help'])
def handle_help(message):
bot.reply_to(message, bmsg.hlp)
@bot.message_handler(commands=['links'])
def handle_links(message):
bot.reply_to(message, bmsg.links)
@bot.message_handler(commands=['faq_ru'])
def handle_faq_ru(message):
bot.reply_to(message, bmsg.faq_ru)
@bot.message_handler(commands=['faq_en'])
def handle_faq_en(message):
bot.reply_to(message, bmsg.faq_en)
@bot.message_handler(commands=['chat_id'])
def handle_chat_id(message):
bot.reply_to(message, f"Current chat id: {message.chat.id}")
@bot.message_handler(commands=['who_clean'])
def handle_who_clean(message):
day_date = datetime.datetime.today()
room_list_1 = ['1001', '1002', '1003', '1004', '1005', '1006', '1007', '1008', '1009', '1010', '1011']
room_list_2 = ['1012', '1013', '1014', '1015', '1016', '1017', '1018', '1019', '1020', '1021', '1022', '1023']
room_first = room_list_1[(day_date - fixed_date).days % len(room_list_1)]
room_second = room_list_2[(day_date - fixed_date).days % len(room_list_2)]
bot.reply_to(message, bmsg.clean_body.format(day_date.strftime("%d/%m/%Y"), room_first, room_second))
@bot.message_handler(content_types=["new_chat_members"])
def handle_joinchat(message):
bot.reply_to(message, bmsg.hlp)
if __name__ == '__main__':
reminder = CleaningReminder(token, chat_id, fixed_date)
reminder.add_remind_time('13:00', 1)
reminder.polling()
bot.polling()
|
bprofile.py
|
#####################################################################
# #
# profile.py #
# #
# Copyright 2014, Chris Billington #
# #
# This file is part of the bprofile project (see #
# https://bitbucket.org/cbillington/bprofile) and is licensed under #
# the Simplified BSD License. See the LICENSE.txt file in the root #
# of the project for the full license. #
# #
#####################################################################
import sys
import os
import subprocess
import pstats
import threading
import time
import atexit
import weakref
import uuid
import tempfile
import functools
import cProfile
this_folder = os.path.dirname(os.path.realpath(__file__))
gprof2dot = os.path.join(this_folder, 'gprof2dot.py')
# Startupinfo, for ensuring subprocesses don't launch with a visible cmd.exe
# window on Windows:
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
try:
STARTF_USESHOWWINDOW = subprocess.STARTF_USESHOWWINDOW
except AttributeError:
# Above is absent in some versions of Python, but for them it is here:
STARTF_USESHOWWINDOW = subprocess._subprocess.STARTF_USESHOWWINDOW
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
else:
startupinfo = None
def find_dot():
if os.name == 'nt':
folders = []
try:
program_files = os.environ["ProgramFiles"]
folders.append(program_files)
except KeyError:
pass
try:
program_files_x86 = os.environ["ProgramFiles(x86)"]
folders.append(program_files_x86)
except KeyError:
pass
for folder in folders:
for subfolder in os.listdir(folder):
if 'graphviz' in subfolder.lower():
dot = os.path.join(folder, subfolder, 'bin', 'dot.exe')
if os.path.exists(dot):
return dot
else:
raise OSError('dot.exe not found, please install graphviz')
else:
with open(os.devnull) as devnull:
if subprocess.call(['type', 'dot'], shell=True, stdout=devnull, stderr=devnull):
raise OSError('\'dot\' not found, please install graphviz')
return 'dot'
DOT_PATH = find_dot()
class BProfile(object):
"""A profiling context manager.
A context manager that after it exits, outputs a .png file of a graph made
via cProfile, gprof2dot and graphviz. The context manager can be used
multiple times, and if used repeatedly, regularly updates its output to
include cumulative results.
An instance can also be used as a decorator, it will simply wrap calls to
the decorated method in the profiling context.
Parameters
----------
output_path: str
The name of the .png report file you would like to output. '.png' will
be appended if not present.
threshold_percent: int or float
Nodes in which execution spends less than this percentage of the total
profiled execution time will not be included in the output.
report_interval: int or float
The minimum time, in seconds, in between output file generation. If
the context manager exits and it has not been at least this long since
the last output was generated, output generation will be delayed until
it has been. More profiling can run in the meantime. This is to
decrease overhead on your program, (even though this overhead will
only be incurred when no code is being profiled), while allowing you
to have ongoing results of the profiling while your code is still
running. If you only use the context manager once, then this argument
has no effect. If you set it to zero, output will be produced after
every exit of the context.
enabled: bool
Whether the profiler is enabled or not. Equivalent to calling
:func:`~bprofile.BProfile.set_enabled` with this argument after
instantiation. Useful for enabling and disabling profiling with
a global flag when you do not have easy access to the instance
- for example when using as a decorator.
Notes
-----
The profiler will return immediately after the context manager, and will
generate its .png report in a separate thread. If the same context manager
is used multiple times output will be generated at most every
``report_interval`` seconds (default: 5). The delay is to allow blocks to
execute many times in between reports, rather than slowing your program
down with generating graphs all the time. This means that if your profile
block is running rapidly and repeatedly, a new report will be produced
every ``report_interval`` seconds.
Pending reports will be generated at interpreter shutdown.
Note that even if ``report_interval`` is short, reporting will not
interfere with the profiling results themselves, as a lock is acquired
that will prevent profiled code from running at the same time as the
report generation code. So the overhead produced by report generation does
not affect the results of profiling - this overhead will only affect
portions of your code that are not being profiled.
The lock is shared between instances, and so you can freely instantiate
many :class:`BProfile` instances to profile different parts of your code.
Instances with the same ``output_path`` will share an underlying cProfile
profiler, and so their reports will be combined. Profile objects are
thread safe, so a single instance can be shared as well anywhere in your
program.
.. warning::
Since only one profiler can be running at a time, two profiled pieces
of code in different threads waiting on each other in any way will
deadlock.
"""
_class_lock = threading.Lock()
_report_required = threading.Event()
_report_thread = None
_reporting_lock = threading.RLock()
_instances_requiring_reports = set()
_profilers = weakref.WeakValueDictionary()
_threadlocal = threading.local()
def __init__(self, output_path, threshold_percent=2.5, report_interval=5, enabled=True):
if not output_path.lower().endswith('.png'):
output_path += '.png'
output_path = os.path.abspath(os.path.realpath(output_path))
with self._class_lock:
self.output_path = output_path
self.threshold_percent = threshold_percent
self.report_interval = report_interval
self.time_of_last_report = time.time() - report_interval
self.enabled = enabled
self.running = False
self._instance_lock = threading.Lock()
# Only one profiler per output file:
try:
self.profiler = self._profilers[self.output_path]
except KeyError:
self.profiler = cProfile.Profile()
self._profilers[self.output_path] = self.profiler
# only one reporting thread to be shared between instances:
if self._report_thread is None:
report_thread = threading.Thread(target=self._report_loop, name='bprofile.BProfile._report_loop')
report_thread.daemon = True
report_thread.start()
self.__class__._report_thread = report_thread
def __call__(self, function):
"""Returns a wrapped version of ``function`` with profiling.
Intended for use as a decorator."""
@functools.wraps(function)
def function_with_profiling(*args, **kwargs):
with self:
return function(*args, **kwargs)
return function_with_profiling
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stop()
def set_enabled(self, enabled):
"""Set whether profiling is enabled.
if enabled==True, all methods work as normal. Otherwise
:func:`~bprofile.BProfile.start`, :func:`~bprofile.BProfile.stop`, and
:func:`~bprofile.BProfile.do_report` become dummy methods that do
nothing. This is useful for having a global variable to turn
profiling on or off, based on whether one is debugging or not, or
to enable or disable profiling of different parts of code selectively.
If profiling is running when this method is called to disable it, the
profiling will be stopped."""
with self._instance_lock:
self.enabled = bool(enabled)
if not enabled and self.running:
self.profiler.disable()
self._class_lock.release()
def start(self):
"""Begin profiling."""
with self._instance_lock:
if not self.enabled:
return
if getattr(self._threadlocal, 'is_profiling', False):
message = ('Profiling is already running in this thread. ' +
'Only one profiler can be running at a time, ' +
'and since we are in the same thread we cannot simply ' +
'wait until it finishes, as that would deadlock. ' +
'I thought you would prefer an error message to a deadlock.')
raise RuntimeError(message)
self._class_lock.acquire()
self._threadlocal.is_profiling = True
self.profiler.enable()
def stop(self):
"""Stop profiling.
Stop profiling and outptut a profiling report, if at least
``report_interval`` has elapsed since the last report. Otherwise
output the report after a delay.
Does not preclude starting profiling again at a later time. Results
are cumulative."""
with self._instance_lock:
if not self.enabled:
return
try:
self.profiler.disable()
self._instances_requiring_reports.add(self)
self._report_required.set()
finally:
self._class_lock.release()
self._threadlocal.is_profiling = False
def do_report(self):
"""Collect statistics and output a .png file of the profiling report.
Notes
-----
This occurs automatically at a rate of ``report_interval``, but one
can call this method to report results sooner. The report will include
results from all :class:`BProfile` instances that have the same
``output_path`` and no more automatic reports (if further profiling is
done) will be produced until after the minimum ``report_interval`` of
those instances.
This method can be called at any time and is threadsafe. It is not
advisable to call it during profiling however as this will incur
overhead that will affect the profiling results. Only automatic
reports are guaranteed to be generated when no profiling is taking
place."""
if not self.enabled:
return
# Randomly named tempfiles, we don't use NamedTemporaryFile as we
# don't want to create the files - just pass the names as command line
# arguments to other programs:
tempfile_prefix = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
pstats_file = tempfile_prefix + '.pstats'
dot_file = tempfile_prefix + '.dot'
with self._reporting_lock:
pstats.Stats(self.profiler).dump_stats(pstats_file)
# All instances with this output file that have a pending report:
instances = set(o for o in self._instances_requiring_reports.copy() if o.output_path == self.output_path)
instances.add(self)
threshold_percent = str(min(o.threshold_percent for o in instances))
try:
subprocess.check_call([sys.executable, gprof2dot, '-n', threshold_percent, '-f', 'pstats',
'-o', dot_file, pstats_file], startupinfo=startupinfo)
subprocess.check_call([DOT_PATH, '-o', self.output_path, '-Tpng', dot_file], startupinfo=startupinfo)
os.unlink(dot_file)
os.unlink(pstats_file)
except subprocess.CalledProcessError:
sys.stderr.write('gprof2dot or dot returned nonzero exit code\n')
for instance in instances:
instance.time_of_last_report = time.time()
try:
self._instances_requiring_reports.remove(instance)
except KeyError:
# Another thread already removed it:
pass
@classmethod
def _atexit(cls):
# Finish pending reports:
with cls._reporting_lock:
while True:
try:
instance = cls._instances_requiring_reports.pop()
except KeyError:
break
else:
instance.do_report()
@classmethod
def _report_loop(cls):
atexit.register(cls._atexit)
timeout = None
while True:
cls._report_required.wait(timeout)
with cls._class_lock:
cls._report_required.clear()
if not cls._instances_requiring_reports:
timeout = None
continue
with cls._reporting_lock:
for instance in cls._instances_requiring_reports.copy():
if instance not in cls._instances_requiring_reports:
# Instance has already had a report run on it,
# because it shares a profiler with another
# instance we just reported on. So it has been
# removed from the set. Do not run an extra report
# on it.
continue
else:
next_report_time = instance.time_of_last_report + instance.report_interval
time_until_report = next_report_time - time.time()
if time_until_report < 0:
instance.do_report()
elif timeout is None:
timeout = time_until_report
else:
timeout = min(timeout, time_until_report)
if __name__ == '__main__':
# Test:
profiler = BProfile('test.png')
@profiler
def decorator_test():
with profiler:
time.sleep(10)
# decorator_test() # this should raise an exception saying it would deadlock
def foo():
time.sleep(0.05)
def bar():
time.sleep(0.1)
start_time = time.time()
for i in range(100):
print(i)
with profiler:
time.sleep(0.1)
# profiler.do_report()
foo()
bar()
print(time.time() - start_time)
|
__init__.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
import collections
import logging
import string
import threading
from ..timeout import Timeout
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print t
{'key': 'value'}
>>> def p(): print t
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that tis tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print context.arch
... context.arch = 'mips'
... print context.arch
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print context.arch
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print context.arch
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
super(Thread, self).__bootstrap()
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> _longest(data) == data
True
>>> for i in _longest(data): print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
def TlsProperty(object):
def __get__(self, obj, objtype=None):
return obj._tls
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable ``pwnlib.context.context``, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print pwnlib.asm.asm('nop').encode('hex')
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'arch': 'i386',
'binary': None,
'bits': 32,
'endian': 'little',
'log_level': logging.INFO,
'newline': '\n',
'os': 'linux',
'signed': False,
'timeout': Timeout.maximum,
'terminal': None,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(ContextType.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print context.timeout
1.0
>>> with context.local(timeout = 2):
... print context.timeout
... context.timeout = 3
... print context.timeout
2.0
3.0
>>> print context.timeout
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
return LocalContext()
def clear(self):
"""
Clears the contents of the context.
All values are set to their defaults.
Examples:
>>> # Default value
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
@_validator
def arch(self, arch):
"""
Target machine architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase, remove everything non-alphanumeric
arch = arch.lower()
arch = arch.replace(string.punctuation, '')
# Attempt to perform convenience and legacy compatibility
# transformations.
transform = {'x86':'i386', 'ppc': 'powerpc', 'x86_64': 'amd64'}
for k, v in transform.items():
if arch.startswith(k):
arch = arch.replace(k,v,1)
try:
defaults = ContextType.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(ContextType.architectures))
for k,v in ContextType.architectures[arch].items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be >= 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be >= 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from ..elf import ELF
e = ELF(binary)
self.arch = e.arch
self.bits = e.bits
self.endian = e.endian
return e
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits / 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be >= 0 (0)
"""
return self.bits/8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in ContextType.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(ContextType.endiannesses))
return ContextType.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in ContextType.oses:
raise AttributeError("os must be one of %r" % sorted(ContextType.oses))
return os
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
``True`` and ``False`` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = ContextType.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(ContextType.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (str, unicode)):
return [value]
return value
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global ``context`` object, used to store commonly-used pwntools settings.
#: In most cases, the context is used to infer default variables values.
#: For example, :meth:`pwnlib.asm.asm` can take an ``os`` parameter as a
#: keyword argument. If it is not supplied, the ``os`` specified by
#: ``context`` is used instead.
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
|
mp.py
|
import logging
import multiprocessing
import select
import unittest
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
import os
import sys
import six
import multiprocessing.connection as connection
from nose2 import events, loader, result, runner, session, util
log = logging.getLogger(__name__)
class MultiProcess(events.Plugin):
configSection = 'multiprocess'
def __init__(self):
self.addArgument(self.setProcs, 'N', 'processes', '# o procs')
self.testRunTimeout = self.config.as_float('test-run-timeout', 60.0)
self._procs = self.config.as_int(
'processes', 0)
self.setAddress(self.config.as_str('bind_address', None))
self.cases = {}
@property
def procs(self):
"""Get the appropriate number of procs for self.procs if self._procs is
0."""
if self._procs == 0:
try:
self._procs = multiprocessing.cpu_count()
except NotImplementedError as e:
self._procs = 1
return self._procs
@procs.setter
def procs(self, value):
"""Setter for procs property"""
if value < 0:
raise AttributeError("Can't set the procs number to less than 0, (0 = Auto)")
self._procs = value
def setProcs(self, num):
self.procs = int(num[0]) # FIXME merge n fix
self.register()
def setAddress(self, address):
if address is None or address.strip() == '':
address = []
else:
address = [x.strip() for x in address.split(':')[:2]]
#Background: On Windows, select.select only works on sockets. So the
#ability to select a bindable address and optionally port for the mp
#plugin was added. Pipes should support a form of select, but this
#would require using pywin32. There are alternatives but all have
#some kind of downside. An alternative might be creating a connection
#like object using a shared queue for incomings events.
self.bind_host = None
self.bind_port = 0
if sys.platform == "win32" or address:
self.bind_host = '127.116.157.163'
if address and address[0]:
self.bind_host = address[0]
self.bind_port = 0
if len(address) >= 2:
self.bind_port = int(address[1])
def pluginsLoaded(self, event):
self.addMethods('registerInSubprocess', 'startSubprocess',
'stopSubprocess')
def startTestRun(self, event):
event.executeTests = self._runmp
def beforeInteraction(self, event):
# prevent interactive plugins from running
event.handled = True
return False
def _runmp(self, test, result):
# flatten technically modifies a hash of test cases, let's
# only run it once per run.
flat = list(self._flatten(test))
# do not send import failures to the subprocesses, which will mangle them
# but 'run' them in the main process.
failed_import_id = 'nose2.loader.LoadTestsFailure'
result_ = self.session.testResult
for testid in flat:
if testid.startswith(failed_import_id):
self.cases[testid].run(result_)
# XXX Process-Handling: The length of the filtered list needs to be
# known for _startProcs, until this can be cleaned up. This
# wasn't the best way to deal with too few tests
flat = [x for x in flat if not x.startswith(failed_import_id)]
procs = self._startProcs(len(flat))
# send one initial task to each process
for proc, conn in procs:
if not flat:
break
caseid = flat.pop(0)
# NOTE: it throws errors on broken pipes and bad serialization
conn.send(caseid)
rdrs = [conn for proc, conn in procs if proc.is_alive()]
while flat or rdrs:
ready, _, _ = select.select(rdrs, [], [], self.testRunTimeout)
for conn in ready:
# XXX Process-Handling: If we get an EOFError on receive the
# process finished= or we lost the process and the test it was
# working on. Also do we rebuild the process?
try:
remote_events = conn.recv()
except EOFError:
# probably dead/12
log.warning("Subprocess connection closed unexpectedly")
continue
# If remote_events is None, the process exited normally,
# which should mean that we didn't any more tests for it.
if remote_events is None:
log.debug("Conn closed %s", conn)
rdrs.remove(conn)
continue
# replay events
testid, events = remote_events
log.debug("Received results for %s", testid)
for (hook, event) in events:
log.debug("Received %s(%s)", hook, event)
self._localize(event)
getattr(self.session.hooks, hook)(event)
# Send the next test_id
# NOTE: send throws errors on broken pipes and bad serialization
if not flat:
# If there are no more, send None - it's the 'done' flag
conn.send(None)
continue
caseid = flat.pop(0)
conn.send(caseid)
for _, conn in procs:
conn.close()
# ensure we wait until all processes are done before
# exiting, to allow plugins running there to finalize
for proc, _ in procs:
proc.join()
def _prepConns(self):
"""
If the ``bind_host`` is not ``None``, return:
(multiprocessing.connection.Listener, (address, port, authkey))
else:
(parent_connection, child_connection)
For the former case: ``accept`` must be called on the listener. In order
to get a ``Connection`` object for the socket.
"""
if self.bind_host is not None:
#prevent "accidental" wire crossing
authkey = os.urandom(20)
address = (self.bind_host, self.bind_port)
listener = connection.Listener(address, authkey=authkey)
return (listener, listener.address + (authkey,))
else:
return multiprocessing.Pipe()
def _acceptConns(self, parent_conn):
"""
When listener is is a :class:`connection.Listener` instance: accept the next
incoming connection. However, a timeout mechanism is needed. Since
this functionality was added to support mp over inet sockets, this method
assumes a Socket-based listen, and will accept the private _socket
member to get a low_level socket to do a select on.
"""
if isinstance(parent_conn, connection.Listener):
#ick private interface
rdrs = [parent_conn._listener._socket]
readable, _, _ = select.select(rdrs, [], [],
self.testRunTimeout)
if readable:
return parent_conn.accept()
else:
raise RuntimeError('MP: Socket Connection Failed')
else:
return parent_conn
def _startProcs(self, test_count):
# Create session export
session_export = self._exportSession()
procs = []
count = min(test_count, self.procs)
log.debug("Creating %i worker processes", count)
for i in range(0, count):
parent_conn, child_conn = self._prepConns()
proc = multiprocessing.Process(
target=procserver, args=(session_export, child_conn))
proc.daemon = True
proc.start()
parent_conn = self._acceptConns(parent_conn)
procs.append((proc, parent_conn))
return procs
def _flatten(self, suite):
"""
Flatten test-suite into list of IDs, AND record all test case
into self.cases
CAVEAT: Due to current limitation of the MP plugin, examine the suite
tests to find out if they have class or module fixtures and
group them that way into name of test classes or module.
This is aid in their dispatch.
"""
log.debug("Flattening test into list of IDs")
mods = {}
classes = {}
stack = [suite]
while stack:
suite = stack.pop()
for test in suite:
if isinstance(test, unittest.TestSuite):
stack.append(test)
else:
testid = util.test_name(test)
self.cases[testid] = test
if util.has_module_fixtures(test):
mods.setdefault(test.__class__.__module__, []).append(
testid)
elif util.has_class_fixtures(test):
if test.__class__.__name__ == "_MethodTestCase":
# wrapped by MethodTestCase in testclasses.py
test = test.obj
if hasattr(test, '_testMethodName') and test._testMethodName:
# test a single method under the test class
yield "%s.%s.%s" % (
test.__class__.__module__,
test.__class__.__name__,
test._testMethodName,
)
else:
classes.setdefault(
"%s.%s" % (test.__class__.__module__,
test.__class__.__name__),
[]).append(testid)
else:
yield testid
for cls in sorted(classes.keys()):
yield cls
for mod in sorted(mods.keys()):
yield mod
def _localize(self, event):
# XXX set loader, case, result etc to local ones, if present in event
# (event case will be just the id)
# (traceback in exc_info if any won't be real!)
if hasattr(event, 'result'):
event.result = self.session.testResult
if hasattr(event, 'loader'):
event.loader = self.session.testLoader
if hasattr(event, 'runner'):
event.runner = self.session.testRunner
if hasattr(event, 'test') and isinstance(event.test, six.string_types):
# remote event.case is the test id
try:
event.test = self.cases[event.test]
except KeyError:
event.test = self.session.testLoader.failedLoadTests(
'test_not_found',
RuntimeError("Unable to locate test case for %s in "
"main process" % event.test))._tests[0]
def _exportSession(self):
"""
Generate the session information passed to work process.
CAVEAT: The entire contents of which *MUST* be pickeable
and safe to use in the subprocess.
This probably includes:
* No argparse namespaces/named-tuples
* No plugin instances
* No hokes
:return:
"""
export = {'config': self.session.config,
'verbosity': self.session.verbosity,
'startDir': self.session.startDir,
'topLevelDir': self.session.topLevelDir,
'logLevel': self.session.logLevel,
'pluginClasses': []}
event = RegisterInSubprocessEvent()
# fire registerInSubprocess on plugins -- add those plugin classes
# CAVEAT: classes must be pickleable!
self.session.hooks.registerInSubprocess(event)
export['pluginClasses'].extend(event.pluginClasses)
return export
def procserver(session_export, conn):
# init logging system
rlog = multiprocessing.log_to_stderr()
rlog.setLevel(session_export['logLevel'])
# make a real session from the "session" we got
ssn = import_session(rlog, session_export)
if isinstance(conn, Sequence):
conn = connection.Client(conn[:2], authkey=conn[2])
event = SubprocessEvent(ssn.testLoader,
ssn.testResult,
ssn.testRunner,
ssn.plugins,
conn)
res = ssn.hooks.startSubprocess(event)
if event.handled and not res:
conn.send(None)
conn.close()
ssn.hooks.stopSubprocess(event)
return
# receive and run tests
executor = event.executeTests
for testid in gentests(conn):
if testid is None:
break
# XXX to handle weird cases like layers, need to
# deal with the case that testid is something other
# than a simple string.
test = event.loader.loadTestsFromName(testid)
# XXX If there a need to protect the loop? try/except?
rlog.debug("Execute test %s (%s)", testid, test)
executor(test, event.result)
events = [e for e in ssn.hooks.flush()]
try:
conn.send((testid, events))
rlog.debug("Log for %s returned", testid)
except:
rlog.exception("Fail sending event %s: %s" % (testid, events))
# Send empty event list to unblock the conn.recv on main process.
conn.send((testid, []))
conn.send(None)
conn.close()
ssn.hooks.stopSubprocess(event)
def import_session(rlog, session_export):
ssn = session.Session()
ssn.config = session_export['config']
ssn.hooks = RecordingPluginInterface()
ssn.verbosity = session_export['verbosity']
ssn.startDir = session_export['startDir']
ssn.topLevelDir = session_export['topLevelDir']
ssn.prepareSysPath()
loader_ = loader.PluggableTestLoader(ssn)
ssn.testLoader = loader_
result_ = result.PluggableTestResult(ssn)
ssn.testResult = result_
runner_ = runner.PluggableTestRunner(ssn) # needed??
ssn.testRunner = runner_
# load and register plugins, forcing multiprocess to the end
ssn.plugins = [
plugin(session=ssn) for plugin in session_export['pluginClasses']
if plugin is not MultiProcess
]
rlog.debug("Plugins loaded: %s", ssn.plugins)
for plugin in ssn.plugins:
plugin.register()
rlog.debug("Registered %s in subprocess", plugin)
# instantiating the plugin will register it.
ssn.plugins.append(MultiProcess(session=ssn))
rlog.debug("Registered %s in subprocess", MultiProcess)
ssn.plugins[-1].pluginsLoaded(events.PluginsLoadedEvent(ssn.plugins))
return ssn
# test generator
def gentests(conn):
while True:
try:
testid = conn.recv()
if testid is None:
return
yield testid
except EOFError:
return
# custom event classes
class SubprocessEvent(events.Event):
"""Event fired at start and end of subprocess execution.
.. attribute :: loader
Test loader instance
.. attribute :: result
Test result
.. attribute :: plugins
List of plugins loaded in the subprocess.
.. attribute :: connection
The :class:`multiprocessing.Connection` instance that the
subprocess uses for communication with the main process.
.. attribute :: executeTests
Callable that will be used to execute tests. Plugins may set
this attribute to wrap or otherwise change test execution. The
callable must match the signature::
def execute(suite, result):
...
"""
def __init__(self, loader, result, runner, plugins, connection, **metadata):
self.loader = loader
self.result = result
self.runner = runner
self.plugins = plugins
self.connection = connection
self.executeTests = lambda test, result: test(result)
super(SubprocessEvent, self).__init__(**metadata)
class RegisterInSubprocessEvent(events.Event):
"""Event fired to notify plugins that multiprocess testing will occur
.. attribute :: pluginClasses
Add a plugin class to this list to cause the plugin to be
instantiated in each test-running subprocess. The most common
thing to do, for plugins that need to run in subprocesses, is::
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
"""
def __init__(self, **metadata):
self.pluginClasses = []
super(RegisterInSubprocessEvent, self).__init__(**metadata)
# custom hook system that records calls and events
class RecordingHook(events.Hook):
def __init__(self, method, interface):
super(RecordingHook, self).__init__(method)
self.interface = interface
def __call__(self, event):
res = super(RecordingHook, self).__call__(event)
self.interface.log(self.method, event)
return res
class RecordingPluginInterface(events.PluginInterface):
hookClass = RecordingHook
noLogMethods = set(
['getTestCaseNames', 'startSubprocess', 'stopSubprocess',
'registerInSubprocess', 'moduleLoadedSuite'])
def __init__(self):
super(RecordingPluginInterface, self).__init__()
self.events = []
def log(self, method, event):
self.events.append((method, event))
def flush(self):
events = self.events[:]
self.events = []
return events
def register(self, method, plugin):
"""Register a plugin for a method.
:param method: A method name
:param plugin: A plugin instance
"""
self._hookForMethod(method).append(plugin)
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError('No %s in %s' % (attr, self))
return self._hookForMethod(attr)
def _hookForMethod(self, method):
# return recording hook for most hooks, normal hook for those
# (like test loading and subprocess events) that we don't want
# to send back to the main process.
try:
return self.hooks[method]
except KeyError:
if method in self.noLogMethods or method.startswith('loadTest'):
hook = events.Hook(method)
else:
hook = self.hookClass(method, self)
self.hooks[method] = hook
return hook
|
fantome_opera_serveur.py
|
from random import shuffle,randrange
from time import sleep
from threading import Thread
import dummy0, dummy1
latence = 0.01
permanents, deux, avant, apres = {'rose'}, {'rouge','gris','bleu'}, {'violet','marron'}, {'noir','blanc'}
couleurs = avant | permanents | apres | deux
passages = [{1,4},{0,2},{1,3},{2,7},{0,5,8},{4,6},{5,7},{3,6,9},{4,9},{7,8}]
pass_ext = [{1,4},{0,2,5,7},{1,3,6},{2,7},{0,5,8,9},{4,6,1,8},{5,7,2,9},{3,6,9,1},{4,9,5},{7,8,4,6}]
def message(texte,jos):
for j in jos:
f = open("./"+str(j.numero)+"/infos.txt","a")
f.write(texte + "\n")
f.close()
def informer(texte):
message(texte,joueurs)
def demander(q,j):
informer("QUESTION : "+q)
f = open("./"+str(j.numero)+"/questions"+".txt","w")
f.write(q)
f.close()
sleep(latence)
f = open("./"+str(j.numero)+"/reponses"+".txt","r")
r = f.read()
f.close()
informer("REPONSE DONNEE : "+r)
return r
class personnage:
def __init__(self,couleur):
self.couleur, self.suspect, self.position, self.pouvoir = couleur, True, 0, True
def __repr__(self):
susp = "-suspect" if self.suspect else "-clean"
return self.couleur + "-" + str(self.position) + susp
class joueur:
def __init__(self,n):
self.numero = n
self.role = "l'inspecteur" if n == 0 else "le fantome"
def jouer(self,party):
informer("****\n Tour de "+self.role)
p = self.selectionner(party.tuiles_actives)
avec = self.activer_pouvoir(p,party,avant|deux)
self.bouger(p,avec,party.bloque)
self.activer_pouvoir(p,party,apres|deux)
def selectionner(self,t):
w = demander("Tuiles disponibles : " + str(t) + " choisir entre 0 et " + str(len(t)-1),self)
i = int(w) if w.isnumeric() and int(w) in range(len(t)) else 0
p = t[i]
informer("REPONSE INTERPRETEE : "+str(p))
informer(self.role + " joue " + p.couleur)
del t[i]
return p
def activer_pouvoir(self,p,party,activables):
if p.pouvoir and p.couleur in activables:
a = demander("Voulez-vous activer le pouvoir (0/1) ?",self) == "1"
informer("REPONSE INTERPRETEE : "+str(a==1))
if a :
informer("Pouvoir de " + p.couleur + " activé")
p.pouvoir = False
if p.couleur == "rouge":
draw = party.cartes[0]
informer(str(draw) + " a été tiré")
if draw == "fantome":
party.start += -1 if self.numero == 0 else 1
elif self.numero == 0:
draw.suspect = False
del party.cartes[0]
if p.couleur == "noir":
for q in party.personnages:
if q.position in {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque} :
q.position = p.position
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "blanc":
for q in party.personnages:
if q.position == p.position and p != q:
dispo = {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque}
w = demander(str(q) + ", positions disponibles : " + str(dispo) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in dispo else dispo.pop()
informer("REPONSE INTERPRETEE : "+str(x))
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "violet":
informer("Rappel des positions :\n" + str(party))
co = demander("Avec quelle couleur échanger (pas violet!) ?",self)
if co not in couleurs:
co = "rose"
informer("REPONSE INTERPRETEE : "+co)
q = [x for x in party.personnages if x.couleur == co][0]
p.position, q.position = q.position, p.position
informer("NOUVEAU PLACEMENT : "+str(p))
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "marron":
return [q for q in party.personnages if p.position == q.position]
if p.couleur == "gris":
w = demander("Quelle salle obscurcir ? (0-9)",self)
party.shadow = int(w) if w.isnumeric() and int(w) in range(10) else 0
informer("REPONSE INTERPRETEE : "+str(party.shadow))
if p.couleur == "bleu":
w = demander("Quelle salle bloquer ? (0-9)",self)
x = int(w) if w.isnumeric() and int(w) in range(10) else 0
w = demander("Quelle sortie ? Chosir parmi : "+str(passages[x]),self)
y = int(w) if w.isnumeric() and int(w) in passages[x] else passages[x].copy().pop()
informer("REPONSE INTERPRETEE : "+str({x,y}))
party.bloque = {x,y}
return [p]
def bouger(self,p,avec,bloque):
pass_act = pass_ext if p.couleur == 'rose' else passages
if p.couleur != 'violet' or p.pouvoir:
disp = {x for x in pass_act[p.position] if p.position not in bloque or x not in bloque}
w = demander("positions disponibles : " + str(disp) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in disp else disp.pop()
informer("REPONSE INTERPRETEE : "+str(x))
for q in avec:
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
class partie:
def __init__(self,joueurs):
for i in [0,1]:
f = open("./" + str(i) + "/infos.txt","w")
f.close()
f = open("./" + str(i) + "/questions.txt","w")
f.close()
f = open("./" + str(i) + "/reponses.txt","w")
f.close()
self.joueurs = joueurs
self.start, self.end, self.num_tour, self.shadow, x = 4, 22, 1, randrange(10), randrange(10)
self.bloque = {x,passages[x].copy().pop()}
self.personnages = {personnage(c) for c in couleurs}
self.tuiles = [p for p in self.personnages]
self.cartes = self.tuiles[:]
self.fantome = self.cartes[randrange(8)]
message("!!! Le fantôme est : "+self.fantome.couleur,[self.joueurs[1]])
self.cartes.remove(self.fantome)
self.cartes += ['fantome']*3
shuffle(self.tuiles)
shuffle(self.cartes)
for i,p in enumerate(self.tuiles):
p.position = i
def actions(self):
joueur_actif = self.num_tour % 2
if joueur_actif == 1:
shuffle(self.tuiles)
self.tuiles_actives = self.tuiles[:4]
else:
self.tuiles_actives = self.tuiles[4:]
for i in [joueur_actif,1-joueur_actif,1-joueur_actif,joueur_actif]:
self.joueurs[i].jouer(self)
def lumiere(self):
partition = [{p for p in self.personnages if p.position == i} for i in range(10)]
if len(partition[self.fantome.position]) == 1 or self.fantome.position == self.shadow:
informer("le fantome frappe")
self.start += 1
for piece,gens in enumerate(partition):
if len(gens) > 1 and piece != self.shadow:
for p in gens:
p.suspect = False
else:
informer("pas de cri")
for piece,gens in enumerate(partition):
if len(gens) == 1 or piece == self.shadow:
for p in gens:
p.suspect = False
self.start += len([p for p in self.personnages if p.suspect])
def tour(self):
informer("**************************\n" + str(self))
self.actions()
self.lumiere()
for p in self.personnages:
p.pouvoir = True
self.num_tour += 1
def lancer(self):
while self.start < self.end and len([p for p in self.personnages if p.suspect]) > 1:
self.tour()
informer("L'enquêteur a trouvé - c'était " + str(self.fantome) if self.start < self.end else "Le fantôme a gagné")
informer("Score final : "+str(self.end-self.start))
def __repr__(self):
return "Tour:" + str(self.num_tour) + ", Score:"+str(self.start)+"/"+str(self.end) + ", Ombre:" + str(self.shadow) + ", Bloque:" + str(self.bloque) +"\n" + " ".join([str(p) for p in self.personnages])
joueurs = [joueur(0),joueur(1)]
Thread(target=dummy0.lancer).start()
Thread(target=dummy1.lancer).start()
partie(joueurs).lancer()
|
u2p2_before_scandevices.py
|
import sys
import time
import spidev
import threading
is_on_raspberry_pi = False
with open('/etc/os-release') as os_version_file:
is_on_raspberry_pi = 'raspbian' in os_version_file.read().lower()
spi = None
if is_on_raspberry_pi:
spi = spidev.SpiDev(0, 0) # rasp
print("I'm on Raspberry Pi!")
else:
spi = spidev.SpiDev(1, 0) # lichee
print("I'm on custom board!")
spi.max_speed_hz = 2000000
fff = open(sys.argv[1], "rb" )
"""
https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/input-event-codes.h#L38
xbox gamepad:
EVENT TYPE: EV_ABS
dpad: ABS_HAT0X ABS_HAT1X
buttons: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/input-event-codes.h#L385
sticks: ABS_X, ABS_Y, ABS_RX, ABS_RY
"""
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
SPI_MSG_KEYBOARD_EVENT = 1
SPI_MSG_MOUSE_EVENT = 2
SPI_MSG_GAMEPAD_EVENT = 3
KEYBOARD_ID_PLACEHOLDER = 0
keyboard_spi_msg_header = [0xde, 0, SPI_MSG_KEYBOARD_EVENT, KEYBOARD_ID_PLACEHOLDER]
def keyboard_worker():
print("keyboard_thread started")
while 1:
data = list(fff.read(16)[8:])
if data[0] == EV_KEY:
spi.xfer(keyboard_spi_msg_header + data)
# print(data)
# print('----')
def mouse_worker():
print("mouse_thread started")
while 1:
time.sleep(0.2)
keyboard_thread = threading.Thread(target=keyboard_worker, daemon=True)
keyboard_thread.start()
mouse_thread = threading.Thread(target=mouse_worker, daemon=True)
mouse_thread.start()
while 1:
# print("main loop")
time.sleep(1)
|
MainFrame.py
|
import threading
import tkinter.ttk as ttk
from tkinter.constants import END, N, S, E, W, NORMAL, DISABLED, RIGHT, CENTER, SEL, INSERT, HORIZONTAL
from tkinter import Text
import pyttsx3
from pyttsx3 import engine
import re
class MainFrame(ttk.Frame):
def __init__(self, **kw):
ttk.Frame.__init__(self, **kw)
self.engine = None
self.spoken_text = ''
self.highlight_index1 = None
self.highlight_index2 = None
self.build_frame_content(kw)
def build_frame_content(self, kw):
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=0)
self.grid_columnconfigure(2, weight=0)
self.grid_columnconfigure(3, weight=1)
row_index = 0
self.progress = ttk.Progressbar(self, orient=HORIZONTAL, mode="determinate")
self.progress.grid(row=row_index, columnspan=4, sticky=(W, E))
row_index += 1
self.grid_rowconfigure(row_index, weight=1)
self.title = ttk.Label(self, font=("Georgia", "80"), justify=RIGHT, text="Speed Reader", anchor=CENTER)
self.title.grid(row=row_index, column=0, columnspan=4, sticky=(N, W, E), pady=15)
row_index += 1
self.spoken_words = ttk.Label(self, font=("Georgia", "20"), justify=RIGHT, anchor=E)
self.spoken_words.grid(row=row_index, column=0, columnspan=4, sticky=(W, E))
row_index += 1
self.current_word_label = ttk.Label(self, font=("Georgia", "120"), anchor=CENTER)
self.current_word_label.grid(row=row_index, column=0, columnspan=4, sticky=(W, E))
row_index += 1
self.next_words = ttk.Label(self, font=("Georgia", "20"), anchor=W)
self.next_words.grid(row=row_index, column=0, columnspan=4, sticky=(W, E))
row_index += 1
self.speed_label = ttk.Label(self, text="Speed: ")
self.speed_label.grid(row=row_index, column=1, pady=10)
self.speed_entry = ttk.Entry(self)
self.speed_entry.insert(0, "500")
self.speed_entry.grid(row=row_index, column=2, pady=10)
row_index += 1
self.grid_rowconfigure(row_index, weight=1)
self.text_area = Text(self, height=5, width=1, font=("Georgia", "40"))
self.text_area.insert(END, '')
self.text_area.tag_config(TAG_CURRENT_WORD, foreground="red")
self.text_area.grid(row=row_index, column=0, columnspan=4, sticky=(N, S, E, W))
row_index += 1
self.speak_button = ttk.Button(self, text="Speak")
self.speak_button.grid(row=row_index, column=1, pady=10)
self.speak_button['state'] = NORMAL
self.speak_button.bind("<Button-1>", self.speak)
self.stop_button = ttk.Button(self, text="Stop")
self.stop_button.grid(row=row_index, column=2, pady=10)
self.stop_button['state'] = DISABLED
self.stop_button.bind("<Button-1>", self.stop)
self.text_area.bind("<Control-Key-a>", self.select_all_text)
self.text_area.bind("<Control-Key-A>", self.select_all_text)
self.master.bind("<Control-Key-b>", self.paste_and_speak)
self.master.bind("<Control-Key-B>", self.paste_and_speak)
self.master.protocol("WM_DELETE_WINDOW", self.on_closing)
def on_closing(self):
self.stop(None)
self.master.destroy()
self.master.quit()
def paste_and_speak(self, event):
self.stop(event)
self.text_area.delete("1.0", END)
self.text_area.insert(END, self.master.clipboard_get())
self.speak(event)
def select_all_text(self, event):
self.text_area.tag_add(SEL, "1.0", END)
def stop(self, event):
if self.stop_button['state'].__str__() == NORMAL:
self.engine.stop()
self.speak_button['state'] = NORMAL
self.stop_button['state'] = DISABLED
def onStart(self, name):
self.speak_button['state'] = DISABLED
self.stop_button['state'] = NORMAL
print("onStart")
def onStartWord(self, name, location, length):
read_trail = 100
left_index = location - read_trail
if left_index < 0:
left_index = 0
self.spoken_words['text'] = self.spoken_text[left_index:location]
self.current_word_label['text'] = self.spoken_text[location:location + length]
self.next_words['text'] = self.spoken_text[location + length:location + length + read_trail]
if self.highlight_index1 is not None:
self.text_area.tag_remove(TAG_CURRENT_WORD, self.highlight_index1, self.highlight_index2)
self.highlight_index1 = "1.{}".format(location)
self.highlight_index2 = "1.{}".format(location + length)
self.text_area.see(self.highlight_index1)
self.text_area.tag_add(TAG_CURRENT_WORD, self.highlight_index1, self.highlight_index2)
self.progress["maximum"] = self.spoken_text.__len__()
self.progress["value"] = location
def onEnd(self, name, completed):
self.speak_button['state'] = NORMAL
self.stop_button['state'] = DISABLED
self.progress["maximum"] = self.spoken_text.__len__()
self.progress["value"] = self.spoken_text.__len__()
print("onEnd")
def speak(self, event):
if self.speak_button['state'].__str__() == NORMAL:
text = self.text_area.get("1.0", END).replace('\n', ' ')
text = re.sub(r'http\S+', ' [URL] ', text)
self.spoken_text = text
self.text_area.delete("1.0", END)
self.text_area.insert(END, self.spoken_text)
speech_speed = int(self.speed_entry.get())
self.thread = threading.Thread(target=self.speak_on_thread, args=(speech_speed, self.spoken_text))
self.thread.daemon = True
self.thread.start()
def speak_on_thread(self, speech_speed, spoken_text):
if self.engine is None:
self.engine = pyttsx3.init()
self.engine.setProperty('rate', speech_speed)
self.engine.connect('started-utterance', self.onStart)
self.engine.connect('started-word', self.onStartWord)
self.engine.connect('finished-utterance', self.onEnd)
self.engine.say(spoken_text)
self.engine.startLoop()
else:
self.engine.setProperty('rate', speech_speed)
self.engine.say(spoken_text)
TAG_CURRENT_WORD = "current word"
|
api.py
|
# =================================================================
# imports
# =================================================================
import threading
import time
import json
import yaml
import numpy as np
import serial
import math
import sys
from serial.tools import list_ports
from subprocess import Popen, PIPE
from pkg_resources import resource_filename
import os
import copy
import re
# =================================================================
# print
# =================================================================
def _printx(enable = True, *arg):
if enable:
print(arg)
class _port_usb(object):
def __init__(self):
self._port = None
def port_list(self):
#result = json.dumps([str(p[0]) for p in list_ports.comports()])
result = [str(p[0]) for p in list_ports.comports()]
self._log_add(result, "port_list")
return json.dumps(result)
def _port_open(self, port):
if self._port: # port is already open
return True
prt = serial.Serial()
prt.port = port
prt.baudrate = 115200
prt.bytesize = serial.EIGHTBITS # number of bits per bytes
prt.parity = serial.PARITY_NONE # set parity check: no parity
prt.stopbits = serial.STOPBITS_ONE # number of stop bits
prt.timeout = .001 # non-block read
prt.writeTimeout = None # timeout for write
try:
prt.open()
self._port = prt
return True
except Exception as ex:
return False
def _port_read(self):
response_line = self._port.readline()
if response_line:
response = str(response_line,'utf-8')
try:
_result = json.loads(response)
return _result
except Exception as ex:
pass
return False
# =================================================================
# methods for working with angles
# =================================================================
# Return a number wrapped in the range [lo, hi)
def wrap(a, lo, hi):
return np.mod(a - lo, hi - lo) + lo
# =================================================================
# handy methods
# =================================================================
class easy_method(object):
def __init__(self):
super(easy_method, self).__init__()
"""
prm:
dict
json-dict
append
True
False
sync
True
False
"""
def set_io(self, prm, fulfill = True, append = True, sync = True):
try:
prm = json.loads(prm)
except:
pass
if sync:
command = [{"command": "set_io", "prm": prm, "fulfill": fulfill}]
else:
command = [{"command": "set_io_async", "prm": prm, "fulfill": fulfill}]
return self.play(command, append)
def set_io_backup(self, prm, fulfill = True, append = True):
try:
prm = json.loads(prm)
except:
pass
command = [{"command": "set_io", "prm": prm, "fulfill": fulfill}]
return self.play(command, append)
"""
prm:
dict
json-dict
append
True
False
"""
def move(self, prm, fulfill = True, append = True):
try:
prm = json.loads(prm)
except:
pass
command = [{"command": "move", "prm": prm, "fulfill": fulfill}]
return self.play(command, append)
"""
prm:
dict
json-dict
"""
def set_toolhead(self, prm):
try:
prm = json.loads(prm)
except:
pass
# mm to inch
if self._config["unit"]["length"] == "mm":
for k in prm:
prm[k] = self._mm_to_inch(prm[k])
command = [{"command": "set_toolhead", "prm": prm}]
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return self.toolhead()
wait = self._wait_for_command(result, time.time()+5)
#wait = self._wait_for_job(result, time.time(), 5)
if not wait:
return self.toolhead()
# update config
self._config["toolhead"]["x"] = prm["x"]
# update xyz
self._xyz = self._travel_to_xyz(np.copy(self._travel))
self._log_add(json.loads(self.position("xyz")), "xyz")
return self.toolhead()
"""
prm:
dict
json-dict
"""
def set_motion(self, prm):
try:
prm = json.loads(prm)
except:
pass
command = [{"command": "set_motion", "prm": prm}]
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return self.motion()
wait = self._wait_for_command(result, time.time()+5)
# update config in return
return self.motion()
"""
prm:
float
str-flaot
"""
def servo(self, prm, append = True):
try:
prm = json.loads(prm)
except:
pass
#command = [{"command": "servo", "prm": prm}]
command = [{"command": "set_io", "prm": {"servo": prm}}]
return self.play(command, append)
"""
prm:
binary
str-binary
"""
def laser(self, prm, append = True):
try:
prm = json.loads(prm)
except:
pass
#command = {"command": "laser", "prm":prm}
command = {"command": "set_io", "prm":{"laser": prm}}
return self.play(command, append)
"""
prm:
dict
json-dict
"""
def output(self, prm, append = True):
try:
prm = json.loads(prm)
except:
pass
#command = {"command": "output", "prm":prm}
command = {"command": "set_io", "prm":prm}
return self.play(command,append)
def halt(self):
self._flush_commands(True)
"""
prm:
dict
json-dict
"""
def gcode(self, prm, fulfill = True, append = True):
try:
prm = json.loads(prm)
except:
pass
command = [{"command": "gcode", "prm": prm, "fulfill": fulfill}]
return self.play(command, append)
"""
prm:
gcode = None, list, json list
gcode_path = None, string, json, list
"""
def play_gcode(self, gcode_path = None, gcode = None, **kwargs):
data = False
# open gcode_path
if gcode_path:
try:
with open(gcode_path, 'r') as f:
data = f.read().splitlines()
except:
data = False
# gcode: list, str, JSON,
if gcode:
# str to data (dict or list)
if type(gcode) == str:
try:
data = json.loads(gcode)
except:
data = False
if type(data) == dict:
data = [data]
elif type(data) == list:
if any([type(j) != dict or type(j) != str for j in data]):
data = False
else:
data = False
try:
commands = [{"command": "g2core", "prm": d} for d in data]
except:
_rtn = {"error": 1 , "message": "not a valid input format"}
self._log_add(_rtn, "play_gcode")
return json.dumps(_rtn)
# xyz space
self.play({"command": "move", "prm": {"path": "line", "movement": 1, "x": 0}}, append = False)
return self.play(commands)
"""
limit rules:
always validate limit
if the device passes the limits only joint works
"""
class Dorna(_port_usb, easy_method):
def __init__(self, config_path = None):
super(Dorna, self).__init__()
# =================================================================
# print
# =================================================================
self._prnt = False
# =================================================================
# module name: "api", "dorna"
# =================================================================
self._mn = "dorna"
# =================================================================
# log
# =================================================================
self._log = None
# =================================================================
# max_read_error
# =================================================================
self._read_error = 10
# =================================================================
# number of axis
# =================================================================
self._axis = 5
# =================================================================
# decimal
# =================================================================
self._delta_e = 0.001
self._display_precision = 4
self._compute_precision = 16
# =================================================================
# utility
# =================================================================
# segment size
self._segment_size = 0.1
# dimension
self._bx = 3.759
self._bz = 8.111
self._l1 = 8.
self._l2 = 6.
# =================================================================
# variable
# =================================================================
# init variable
self._init_variable()
# =================================================================
# config
# =================================================================
# init config
if not config_path:
config_path = resource_filename(self._mn, 'config.yaml')
_printx(self._prnt,"config_path: ", config_path)
self._device["config"] = config_path
self._init_config()
# =================================================================
# thread
# =================================================================
# thread
self._stop = False
command_thread = threading.Thread(target = self._command_thread)
command_thread.start()
send_thread = threading.Thread(target = self._send)
send_thread.start()
receive_thread = threading.Thread(target = self._receive)
receive_thread.start()
# =================================================================
# update arduino
# =================================================================
def _baud(self, port_name):
platform = sys.platform
baud = []
if platform == "win32": # windows
baud.append("mode "+port_name+" BAUD=1200")
#bossac = "resources/windows/bossac --port="+port_name+" -U true -e -w -v -b "+bin_path+ " -R"
elif platform == "darwin": # mac
baud.append("stty -f "+port_name+" 1200")
else: # linux
baud.append("sudo stty -F "+port_name+" 1200 hup")
baud.append("sudo stty -F "+port_name+" 9600 hup")
# baud
try:
for cmd in baud:
time.sleep(0.5)
sp = Popen(cmd, shell=True, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True)
sp.communicate()
return True
except Exception as ex:
_printx(self._prnt, ex)
return False
def _bossac(self, port_name, bin_path):
platform = sys.platform
if platform == "win32": # windows
bossac_path = resource_filename(self._mn, 'resources/windows/bossac')
bossac = [bossac_path, "-p", port_name, "-U", "true", "-e", "-w", "-v", "-b", bin_path ,"-R"]
elif platform == "darwin": # mac
bossac_path = resource_filename(self._mn, 'resources/mac/bossac')
bossac = ["sudo", bossac_path, "-U", "true", "-e", "-w", "-v", "-b", bin_path, "-R"]
self._bossac_exe(bossac_path)
else: # linux
bossac_path = "bossac"
if "/dev/" in port_name:
port_name = port_name[5:]
# shell is True: bossac = "sudo "+ bossac_path + " --port="+port_name+" -U true -e -w -v -i -b -R " + bin_path
bossac = ["sudo", bossac_path, "-p", port_name,"-U", "true", "-e", "-w", "-v", "-i", "-b", bin_path, "-R"]
# installing bossac
line_last = ""
self._log_add({"status": 1, "message": "Updating..."}, "update_firmware")
time.sleep(1)
try:
with Popen(bossac, shell=False, stdout=PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
line_last = line
_printx(self._prnt,line)
# check fro (fff/fff)
if all([x in line for x in ["(","/"," pages)", "%"]]):
try:
# find the %
_index = line.find("%")
tmp_log = line[_index+1:]
# find (
_index = tmp_log.find("(")
tmp_log = tmp_log[_index+1:]
# find pages)
_index = tmp_log.find(" pages)")
tmp_log = tmp_log[0:_index]
# nom and denom
nom, denom = tmp_log.split("/")
nom = int(nom)
denom = int(denom)
_max_square = 32
_number_sqr = math.floor(_max_square*nom/denom)
_percentage = math.floor(100*nom/denom)
# percentage 8 characters: " 100%"
percentage_text = (8 - len(str(_percentage) + "%"))* " " +str(_percentage) + "%"
# square 35 characters: " |███████████████████████████ |"
square_text = " |" + _number_sqr*"\u2588" + (_max_square -_number_sqr )*" "+ "|"
# command 17 characters: " 108/111 commands"
_command_text = str(nom)+ "/"+str(denom)+ " pages"
_command_text = (17-len(_command_text))*" "+ _command_text
if nom != denom:
print(percentage_text,square_text,_command_text, end="\r")
else:
print(percentage_text,square_text,_command_text)
# log_add
#self._log_add({"status": 2, "nom": nom, "denom": denom}, "update_firmware")
except Exception as ex:
pass
except Exception as ex:
return False
if line_last.strip() in ["CPU reset.", "Set boot flash true"]:
return True
return False
def _bossac_reset(self, port_name):
platform = sys.platform
if platform == "win32": # windows
bossac_path = resource_filename(self._mn, 'resources/windows/bossac')
bossac = [bossac_path, "-p", port_name, "-b","-R"]
elif platform == "darwin": # mac
bossac_path = resource_filename(self._mn, 'resources/mac/bossac')
bossac = ["sudo", bossac_path,"-b", "-R"]
else: # linux
bossac_path = "bossac"
if "/dev/" in port_name:
port_name = port_name[5:]
bossac = ["sudo", bossac_path, "-p", port_name, "-b", "-R"]
# installing bossac
line_last = ""
self._log_add({"status": 1, "message": "Reseting..."}, "update_firmware")
time.sleep(1)
try:
with Popen(bossac, shell=False, stdout=PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
line_last = line
_printx(self._prnt,line)
# check fro (fff/fff)
if all([x in line for x in ["(","/"," pages)", "%"]]):
try:
# find the %
_index = line.find("%")
tmp_log = line[_index+1:]
# find (
_index = tmp_log.find("(")
tmp_log = tmp_log[_index+1:]
# find pages)
_index = tmp_log.find(" pages)")
tmp_log = tmp_log[0:_index]
# nom and denom
nom, denom = tmp_log.split("/")
nom = int(nom)
denom = int(denom)
_max_square = 32
_number_sqr = math.floor(_max_square*nom/denom)
_percentage = math.floor(100*nom/denom)
# percentage 8 characters: " 100%"
percentage_text = (8 - len(str(_percentage) + "%"))* " " +str(_percentage) + "%"
# square 35 characters: " |███████████████████████████ |"
square_text = " |" + _number_sqr*"\u2588" + (_max_square -_number_sqr )*" "+ "|"
# command 17 characters: " 108/111 commands"
_command_text = str(nom)+ "/"+str(denom)+ " pages"
_command_text = (17-len(_command_text))*" "+ _command_text
if nom != denom:
print(percentage_text,square_text,_command_text, end="\r")
else:
print(percentage_text,square_text,_command_text)
# log_add
#self._log_add({"status": 2, "nom": nom, "denom": denom}, "update_firmware")
except Exception as ex:
pass
except Exception as ex:
return False
if line_last.strip() in ["CPU reset.", "Set boot flash true"]:
return True
return False
def _bossac_exe(self, bossac_path):
# form the command
bossac = ["sudo", "chmod", "+x", bossac_path]
# run the command
sp = Popen(bossac, shell=False, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True)
out, err = sp.communicate()
out = out.splitlines()
err = err.splitlines()
for line in out + err:
_printx(self._prnt, line)
def reset_board(self, port_name = None):
# bin_path = "./resources/firmware/firmware.bin"
"""
baud
mac: stty -f port_name 1200
windows: mode port_name BAUD=1200
linux:
stty -F port_name 1200 hup
stty -F port_name 9600
"""
self._log_add({"status": 1, "message": "Progressing..."}, "update_firmware")
print("Progressing...")
num_try = 8
### disconnect first ###
self.disconnect()
time.sleep(1)
# check bossac exists
if sys.platform not in ["win32", "darwin"]: # linux
#sudo apt-get install bossa-cli
sp = Popen(["sudo", "bossac"], shell=False, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True)
out, err = sp.communicate()
out = out.splitlines()
err = err.splitlines()
for line in out + err:
self._log_add({"status": 1, "message": line}, "update_firmware")
if "not found" in line:
# log
_rtn = {"status" : 100, "message": "You need to install BOSSA flash programming utility. Run: sudo apt-get install bossa-cli"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
# port name is given
if port_name:
result = json.loads(self._reset_board(port_name))
if not result["status"]:
# log
return result
else:
port_list = json.loads(self.port_list())
for port_name in port_list:
result = json.loads(self._reset_board(port_name))
if not result["status"]:
# log
return result
# log
_rtn = {"status" : 100, "message": "Reset failed"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
def _reset_board(self, port_name):
# port list
port_list_before = json.loads(self.port_list())
# port not found
if port_name not in port_list_before:
_rtn = {"status" : 100, "message": "USB port not found"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
# iteration
baud = False
num_try = 8
i = 0
while i < num_try and not baud:
i += 1
# time sleep
time.sleep(1)
# set port_baud
port_baud = port_name
port_list_after = json.loads(self.port_list())
if port_baud not in port_list_after:
difference_list = [x for x in port_list_after if x not in port_list_before]
if difference_list:
port_baud = difference_list[0]
else:
_rtn = {"status" : 100, "message": "Update failed"}
return json.dumps(_rtn)
# baud
baud = self._baud(port_baud)
if baud:
time.sleep(1)
# set port bossac
port_bossac = port_baud
port_list_bossac = json.loads(self.port_list())
if port_bossac not in port_list_bossac:
difference_list = [x for x in port_list_bossac if x not in port_list_after]
if difference_list:
port_bossac = difference_list[0]
else:
return json.dumps({"status" : 100, "message": "Reset failed"})
#bossac
#if self._bossac(port_bossac, firmware_path):
if self._bossac_reset(port_bossac):
# log
_rtn = {"status" : 0, "message": "Completed: board reseted on port "+ port_name + "."}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
return json.dumps({"status" : 100, "message": "Reset failed"})
def update_firmware(self, port_name = None, firmware_path = None):
# bin_path = "./resources/firmware/firmware.bin"
"""
baud
mac: stty -f port_name 1200
windows: mode port_name BAUD=1200
linux:
stty -F port_name 1200 hup
stty -F port_name 9600
"""
self._log_add({"status": 1, "message": "Progressing..."}, "update_firmware")
print("Progressing...")
num_try = 8
### disconnect first ###
self.disconnect()
time.sleep(1)
### firmware_path
if not firmware_path:
firmware_path = resource_filename(self._mn, 'resources/firmware/firmware.bin')
# check bossac exists
if sys.platform not in ["win32", "darwin"]: # linux
#sudo apt-get install bossa-cli
sp = Popen(["sudo", "bossac"], shell=False, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True)
out, err = sp.communicate()
out = out.splitlines()
err = err.splitlines()
for line in out + err:
self._log_add({"status": 1, "message": line}, "update_firmware")
if "not found" in line:
# log
_rtn = {"status" : 100, "message": "You need to install BOSSA flash programming utility. Run: sudo apt-get install bossa-cli"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
# port name is given
if port_name:
result = json.loads(self._update_firmware(port_name, firmware_path))
if not result["status"]:
# log
return result
else:
port_list = json.loads(self.port_list())
for port_name in port_list:
result = json.loads(self._update_firmware(port_name, firmware_path))
if not result["status"]:
# log
return result
# log
_rtn = {"status" : 100, "message": "Update failed"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
def _update_firmware(self, port_name, firmware_path):
# port list
port_list_before = json.loads(self.port_list())
# port not found
if port_name not in port_list_before:
_rtn = {"status" : 100, "message": "USB port not found"}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
# iteration
baud = False
num_try = 8
i = 0
while i < num_try and not baud:
i += 1
# time sleep
time.sleep(1)
# set port_baud
port_baud = port_name
port_list_after = json.loads(self.port_list())
if port_baud not in port_list_after:
difference_list = [x for x in port_list_after if x not in port_list_before]
if difference_list:
port_baud = difference_list[0]
else:
_rtn = {"status" : 100, "message": "Update failed"}
return json.dumps(_rtn)
# baud
baud = self._baud(port_baud)
if baud:
time.sleep(1)
# set port bossac
port_bossac = port_baud
port_list_bossac = json.loads(self.port_list())
if port_bossac not in port_list_bossac:
difference_list = [x for x in port_list_bossac if x not in port_list_after]
if difference_list:
port_bossac = difference_list[0]
else:
return json.dumps({"status" : 100, "message": "Update failed"})
#bossac
#if self._bossac(port_bossac, firmware_path):
if self._bossac(port_bossac, firmware_path):
# log
_rtn = {"status" : 0, "message": "Completed: firmware updated on port "+ port_name+"."}
self._log_add(_rtn, "update_firmware")
return json.dumps(_rtn)
return json.dumps({"status" : 100, "message": "Update failed"})
# =================================================================
# all the public attr
# =================================================================
#??? add log or not
def device(self):
#tmp = dict(self._device)
tmp = copy.deepcopy(self._device)
if tmp["state"] != None:
tmp["state"] = math.ceil(tmp["state"])
# config
try:
tmp["config"] = os.path.abspath(tmp["config"])
except:
pass
return json.dumps(tmp)
def _print_percentage(self, nom, denom, new_line = False):
_max_square = 32
_number_sqr = math.floor(_max_square*nom/denom)
_percentage = math.floor(100*nom/denom)
# percentage 8 characters: " 100%"
percentage_text = (8 - len(str(_percentage) + "%"))* " " +str(_percentage) + "%"
# square 35 characters: " |███████████████████████████ |"
square_text = " |" + _number_sqr*"\u2588" + (_max_square -_number_sqr )*" "+ "|"
# command 17 characters: " 108/111 commands"
_command_text = str(nom)+ "/"+str(denom)+ " commands"
_command_text = (17-len(_command_text))*" "+ _command_text
if new_line:
print(percentage_text,square_text,_command_text)
else:
print(percentage_text,square_text,_command_text, end="\r")
def _connect_percentage(self, _init_nom, denom, command_list, max_time):
while len(command_list) and time.time() < max_time and self._device["connection"]:
try:
if command_list[0]["id"] <= self._system["command"][5][-1]["id"]:
_init_nom += 1
if _init_nom == denom:
self._print_percentage(_init_nom, denom, True)
else:
self._print_percentage( _init_nom, denom)
command_list.pop(0)
# add to the log
self._log_add({"nom":_init_nom, "denom": denom}, "connect_percentage")
else:
time.sleep(0.02)
except Exception as e:
time.sleep(0.02)
def _connect_percentage_backup(self, _init_nom, denom, command_list, max_time):
while len(command_list) and time.time() < max_time and self._device["connection"]:
if command_list[0]["id"] <= self._system["command"][5][-1]["id"]:
_init_nom += 1
if _init_nom == denom:
self._print_percentage(_init_nom, denom, True)
else:
self._print_percentage( _init_nom, denom)
command_list.pop(0)
# add to the log
self._log_add({"nom":_init_nom, "denom": denom}, "connect_percentage")
else:
time.sleep(0.02)
def _command_mask(self, command):
#_allowed_keys = ["id", "state", "error", "message", "command","prm","fulfill", "key"]
_state = [0, 0, 1,1,1,2]
_remove_key = ["travel_final", "gc", "display"]
_command = []
for c in command:
#x = dict(c)
x = copy.deepcopy(c)
if x["display"]:
for r in _remove_key:
x.pop(r, None)
# change state
x["state"] = _state[x["state"]]
_command.append(x)
return _command
def _command_by_id(self, id_list):
# sanitate only int or list of int
if type(id_list) is not list:
id_list = [id_list]
# sort
id_list.sort()
# search
_command = []
command_index = 5
# every id is an integer
if any([type(x) != int for x in id_list]):
return _command
#_id = id_list.pop(0)
while id_list and command_index >= 0:
result = next((item for item in self._system["command"][command_index] if item["id"] == id_list[0]), None)
if result != None:
_command.append(result)
id_list.pop(0)
else:
command_index += -1
return _command
def _command_by_state(self, state_list):
# sanitate
if type(state_list) is not list:
state_list = [state_list]
# sort
state_list.sort(reverse=True)
_command = []
for state in state_list:
_command += self._system["command"][state]
return _command
def command(self, prm):
# json
if type(prm) == str:
try:
prm = json.loads(prm)
except:
prm = False
_result = []
if type(prm) == dict and "id" in prm:
_result = self._command_by_id(prm["id"])
elif type(prm) == dict and "state" in prm:
_state = [[0,1], [2,3,4], [5]]
state = []
if type(prm["state"]) != list:
prm["state"] = [prm["state"]]
for s in prm["state"]:
try:
state += _state[s]
except:
pass
_result = self._command_by_state(state)
_result = self._command_mask(_result)
return json.dumps(_result)
def xyz_to_joint(self, xyz):
tmp_xyz =np.array(xyz[0:self._config["axis"]["number"]])
# unit
if self._config["unit"]["length"] == "mm":
for i in range(0,3):
tmp_xyz[i] = self._mm_to_inch(tmp_xyz[i])
# xyz to joint
return json.dumps(self._xyz_to_joint(tmp_xyz)["joint"].tolist())
def joint_to_xyz(self, joint):
tmp_joint = np.array(joint[0:self._config["axis"]["number"]])
tmp_xyz = self._joint_to_xyz(tmp_joint)
# unit
if self._config["unit"]["length"] == "mm":
for i in range(0,3):
tmp_xyz[i] = self._inch_to_mm(tmp_xyz[i])
return json.dumps(tmp_xyz.tolist())
def position(self, space = "joint"):
if space[0] == "j":
return json.dumps(self._joint[0:self._config["axis"]["number"]].tolist())
elif space[0] == "x":
# unit
if self._config["unit"]["length"] == "mm":
tmp_xyz = self._xyz[0:self._config["axis"]["number"]].tolist()
for i in range(0,3):
tmp_xyz[i] = self._inch_to_mm(tmp_xyz[i])
return json.dumps(tmp_xyz)
return json.dumps(self._xyz[0:self._config["axis"]["number"]].tolist())
rtn = np.array([None for _ in range(self._config["axis"]["number"])])
return json.dumps(rtn.tolist())
def homed(self):
rtn = {}
rtn["j0"] = int(self._home_robot["x"] == 1)
rtn["j1"] = int(self._home_robot["y"] == 1)
rtn["j2"] = int(self._home_robot["z"] == 1)
try:
if self._home_robot["a"]+self._home_robot["b"] == 2:
rtn["j3"] = 1
else:
rtn["j3"] = 0
except:
rtn["j3"] = None
rtn["j4"] = rtn["j3"]
return json.dumps(rtn)
def io(self):
#tmp = dict(self._io)
tmp = copy.deepcopy(self._io)
tmp["laser"] = tmp["out5"]
tmp.pop("out5", None)
return json.dumps(tmp)
def log_start(self, l):
self._log = l
self._log_id = 0
def log(self):
return json.dumps(self._log)
def _log_add(self,msg, _type = "info"):
# make sure it is list
if type(self._log) != list:
return None
self._log.append(json.dumps({"id": self._log_id, "time": time.time(), "type": _type, "message": msg}))
self._log_id += 1
def _init_variable(self):
# ???
self.limit_base = [-175,175]
# device_id
"""
connection => 0: disconnected, 1: connecting, 2: connected
state => 0: stopped, 1 : running, 0.5 stopping
"""
self._device = {"id": None, "connection": 0, "port": None, "fv": None, "config": None, "state": None, "version": "1.4.2"}
# travel
self._travel = np.array([None,None,None,None,None,None]) # 6 axis
self._joint = self._travel_to_joint(np.copy(self._travel))
self._xyz = self._travel_to_xyz(np.copy(self._travel))
# home j0 ,j1, j2, (j3,j4)
self._home_system = {"home": None, "homx": None, "homy": None, "homz": None, "homa": None, "homb": None}
self._home_robot = {"x": None, "y": None, "z": None, "a": None, "b": None, "c": None}
self._scale = {"speed": 0.5, "jerk": 0.5}
# io
self._io = {"out1": None, "out2": None , "out3": None, "out4": None,
"out5": None,
"in1": None, "in2":None, "in3": None, "in4":None,
"do1mo": None , "do2mo": None, "do3mo": None,"do4mo": None,
"di1mo": None, "di2mo": None,"di3mo": None, "di4mo": None,
"servo": None}
# servo
#self._srv = None
# system
self._system = {"lines_to_send": 4,
"qr": 48,
"travel_final" : None,
"command": [[], [], [], [] ,[], []],
"command_id": -1,
"valid_command_id": 0,
"gc": None,
"state": None,
"probe": {}
}
# sanitate data and make sure it has a right format
def _sanitate_command(self, data):
# str to data (dict or list)
if type(data) == str:
try:
data = json.loads(data)
except:
data = False
if type(data) == dict:
data = [data]
elif type(data) == list:
if any([type(j) != dict for j in data]):
data = False
else:
data = False
return data
# format the commands
def _format_command(self, commands):
commands_tmp = []
for command in commands:
self._system["command_id"] += 1
command_tmp = {"id": self._system["command_id"],
"state": 0,
"error": None,
"message": None,
"travel_final": None,
"gc": [],
"command": command["command"],
"fulfill": True,
"key": None,
"display": True
}
# prm
if "prm" in command:
command_tmp["prm"] = command["prm"]
# fulfill
if "fulfill" in command:
command_tmp["fulfill"] = command["fulfill"]
# display
if "display" in command:
command_tmp["display"] = command["display"]
# key
if "key" in command:
command_tmp["key"] = command["key"]
commands_tmp.append(command_tmp)
return commands_tmp
def _port_close(self):
try:
self._port.close()
except:
pass
self._port = None
self._device["connection"] = 0
self._device["state"] = None
result = self.device()
# add log
self._log_add(json.loads(result), "device")
# return
return result
def terminate(self):
# make sure everything is finished
while self._device["state"]:
time.sleep(0.01)
self._port_close()
self._stop = True
def disconnect(self):
return self._port_close()
def connect(self,port_name = None, file_init = None): # port: open, send: startup commands
# open port
# send Id
# wait for it for 1 sec
# send the rest
# wait for end
# change flag to ready
#self._stop = False
### search for all ports ###
print("Progressing...")
if port_name:
return self._connect(port_name, file_init)
else:
port_all = json.loads(self.port_list())
for port in port_all:
result = self._connect(port, file_init)
result = json.loads(result)
if result["connection"] == 2:
#self.set_joint([0,0,0,0,0])
return self.device()
return self._port_close()
def _connect(self,port_name,file_init):
try:
"""
# linux sudo permission
if sys.platform not in ["win32" , "darwin"]:
check_output("sudo chmod 666 " + port_name, shell=True).decode()
"""
# linux sudo permission
if sys.platform != "win32":
#Popen(bossac, shell=True, stdout=PIPE, bufsize=1, universal_newlines=True)
#check_output("sudo chmod 777 " + port_name, shell=False).decode()
with Popen(["sudo", "chmod", "777", port_name], shell=False, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True) as p:
pass
# initial port open failed
if not self._port_open(port_name):
return self._port_close()
# change to connecting status
self._device["connection"] = 1
job = [
[{"command": "g2core", "prm": "{line:n}"}, {"command": "g2core", "prm": "{id: n, fv:n}"}],
[{"command": "g2core", "prm": "{sr: n}"}],
[{"command": "set_toolhead", "prm": {"x": self._config["toolhead"]["x"]}}, {"command": "move", "prm":{"path": "joint", "movement": 1, "speed": self._config["default_speed"]["joint"], "j0": 0, "jerk": list(self._config["default_jerk"]["joint"])}},{"command": "g2core", "prm": "{tt32:n}"}, {"command": "set_motion", "prm": self._config["motion"]}]
]
# file init
if file_init:
try:
with open(file_init) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
# form g2core
job.append([{"command": "g2core", "prm": "c"} for c in content])
except:
pass
# number of jobs
_init_num = [0]
for j in job:
_init_num.append(_init_num[-1] +len(j))
for i, j in enumerate(job):
result = self.play(j, True)
result = json.loads(result)
if type(result)!= list or len(result) == 0:
return self._port_close()
self._connect_percentage(_init_num[i], _init_num[-1], result, 5 + time.time() + 2*len(result))
# wait a little bit
_time = time.time()
while self._device["state"]!= 0 and time.time() < _time + 1:
time.sleep(0.005)
# make sure the state is 0
if self._device["state"] != 0:
return self._port_close()
self._device["connection"] =2
self._device["port"] = port_name
result = self.device()
self._log_add(json.loads(result), "device")
return result
except Exception as x:
_printx(self._prnt,"error: ",x)
return self._port_close()
def connect_backup(self,port_name = None): # port: open, send: startup commands
# open port
# send Id
# wait for it for 1 sec
# send the rest
# wait for end
# change flag to ready
#self._stop = False
### search for all ports ###
print("Progressing...")
if port_name:
return self._connect(port_name)
else:
port_all = json.loads(self.port_list())
for port in port_all:
result = self._connect(port)
result = json.loads(result)
if result["connection"] == 2:
return self.device()
return self._port_close()
def _connect_backup(self,port_name):
try:
"""
# linux sudo permission
if sys.platform not in ["win32" , "darwin"]:
check_output("sudo chmod 666 " + port_name, shell=True).decode()
"""
# linux sudo permission
if sys.platform != "win32":
#Popen(bossac, shell=True, stdout=PIPE, bufsize=1, universal_newlines=True)
#check_output("sudo chmod 777 " + port_name, shell=False).decode()
with Popen(["sudo", "chmod", "777", port_name], shell=False, stdout=PIPE,stderr=PIPE, bufsize=1, universal_newlines=True) as p:
pass
# initial port open failed
if not self._port_open(port_name):
return self._port_close()
# change to connecting status
self._device["connection"] = 1
job = [
[{"command": "g2core", "prm": "{line:n}"}, {"command": "g2core", "prm": "{id: n, fv:n}"}],
[{"command": "g2core", "prm": "{di1fn:4}"},{"command": "g2core", "prm": "{ej: 1}"},{"command": "g2core", "prm": "{jv:4}"},{"command": "g2core", "prm": "{sv:1}"},{"command": "g2core", "prm": "{si:200}"},{"command": "g2core", "prm": "{qv:2}"},{"command": "g2core", "prm": "{xam:1}"},{"command": "g2core", "prm": "{yam:1}"},{"command": "g2core", "prm": "{zam:1}"},{"command": "g2core", "prm": "{aam:1}"},{"command": "g2core", "prm": "{bam:1}"},{"command": "g2core", "prm": "{cam:1}"},{"command": "g2core", "prm": "{1:{sa:1.8}}"},{"command": "g2core", "prm": "{2:{sa:1.8}}"},{"command": "g2core", "prm": "{3:{sa:1.8}}"},{"command": "g2core", "prm": "{4:{sa:1.8}}"},{"command": "g2core", "prm": "{5:{sa:1.8}}"},{"command": "g2core", "prm": "{6:{sa:1.8}}"},{"command": "g2core", "prm": "{1:{tr:45}}"},{"command": "g2core", "prm": "{2:{tr:18}}"},{"command": "g2core", "prm": "{3:{tr:18}}"},{"command": "g2core", "prm": "{4:{tr:90}}"},{"command": "g2core", "prm": "{5:{tr:90}}"},{"command": "g2core", "prm": "{6:{tr:1.3535433}}"},{"command": "g2core", "prm": "{1:{mi:32}}"},{"command": "g2core", "prm": "{2:{mi:32}}"},{"command": "g2core", "prm": "{3:{mi:32}}"},{"command": "g2core", "prm": "{4:{mi:32}}"},{"command": "g2core", "prm": "{5:{mi:32}}"},{"command": "g2core", "prm": "{6:{mi:32}}"},{"command": "g2core", "prm": "{1:{ma:0}}"},{"command": "g2core", "prm": "{2:{ma:1}}"},{"command": "g2core", "prm": "{3:{ma:2}}"},{"command": "g2core", "prm": "{4:{ma:3}}"},{"command": "g2core", "prm": "{5:{ma:4}}"},{"command": "g2core", "prm": "{6:{ma:5}}"},{"command": "g2core", "prm": "{1:{po:0}}"},{"command": "g2core", "prm": "{2:{po:1}}"},{"command": "g2core", "prm": "{3:{po:0}}"},{"command": "g2core", "prm": "{4:{po:1}}"},{"command": "g2core", "prm": "{5:{po:1}}"},{"command": "g2core", "prm": "{6:{po:0}}"},{"command": "g2core", "prm": "{1:{pm:1}}"},{"command": "g2core", "prm": "{2:{pm:1}}"},{"command": "g2core", "prm": "{3:{pm:1}}"},{"command": "g2core", "prm": "{4:{pm:1}}"},{"command": "g2core", "prm": "{5:{pm:1}}"},{"command": "g2core", "prm": "{6:{pm:1}}"},{"command": "g2core", "prm": "{1:{pl:1.0}}"},{"command": "g2core", "prm": "{2:{pl:1.0}}"},{"command": "g2core", "prm": "{3:{pl:1.0}}"},{"command": "g2core", "prm": "{4:{pl:1.0}}"},{"command": "g2core", "prm": "{5:{pl:1.0}}"},{"command": "g2core", "prm": "{6:{pl:1.0}}"},{"command": "g2core", "prm": "{xtn:1}"},{"command": "g2core", "prm": "{xtm:1000}"},{"command": "g2core", "prm": "{xhi:6}"},{"command": "g2core", "prm": "{xhd:0}"},{"command": "g2core", "prm": "{xsv:4000}"},{"command": "g2core", "prm": "{xlv:500}"},{"command": "g2core", "prm": "{xlb:50}"},{"command": "g2core", "prm": "{xzb:69}"},{"command": "g2core", "prm": "{ytn:0}"},{"command": "g2core", "prm": "{ytm:420}"},{"command": "g2core", "prm": "{yhi:2}"},{"command": "g2core", "prm": "{yhd:0}"},{"command": "g2core", "prm": "{ysv:4000}"},{"command": "g2core", "prm": "{ylv:500}"},{"command": "g2core", "prm": "{ylb:50}"},{"command": "g2core", "prm": "{yzb:90}"},{"command": "g2core", "prm": "{ztn:100}"},{"command": "g2core", "prm": "{ztm:1500}"},{"command": "g2core", "prm": "{zhi:3}"},{"command": "g2core", "prm": "{zhd:1}"},{"command": "g2core", "prm": "{zsv:4000}"},{"command": "g2core", "prm": "{zlv:500}"},{"command": "g2core", "prm": "{zlb:50}"},{"command": "g2core", "prm": "{zzb:82}"},{"command": "g2core", "prm": "{atn:-1000}"},{"command": "g2core", "prm": "{btn:-1000}"},{"command": "g2core", "prm": "{atm:0}"},{"command": "g2core", "prm": "{btm:0}"},{"command": "g2core", "prm": "{ahi:4}"},{"command": "g2core", "prm": "{bhi:5}"},{"command": "g2core", "prm": "{ahd:1}"},{"command": "g2core", "prm": "{bhd:1}"},{"command": "g2core", "prm": "{asv:10000}"},{"command": "g2core", "prm": "{bsv:10000}"},{"command": "g2core", "prm": "{alv:5000}"},{"command": "g2core", "prm": "{blv:5000}"},{"command": "g2core", "prm": "{alb:30}"},{"command": "g2core", "prm": "{blb:30}"},{"command": "g2core", "prm": "{azb:60}"},{"command": "g2core", "prm": "{bzb:175}"},{"command": "g2core", "prm": "{xvm:30000}"},{"command": "g2core", "prm": "{yvm:30000}"},{"command": "g2core", "prm": "{zvm:30000}"},{"command": "g2core", "prm": "{avm:30000}"},{"command": "g2core", "prm": "{bvm:30000}"},{"command": "g2core", "prm": "{cvm:30000}"},{"command": "g2core", "prm": "{xfr:30000}"},{"command": "g2core", "prm": "{yfr:30000}"},{"command": "g2core", "prm": "{zfr:30000}"},{"command": "g2core", "prm": "{afr:30000}"},{"command": "g2core", "prm": "{bfr:30000}"},{"command": "g2core", "prm": "{cfr:30000}"}],
[{"command": "g2core", "prm": "{sr: n}"}],
[{"command": "set_toolhead", "prm": {"x": self._config["toolhead"]["x"]}}, {"command": "move", "prm":{"path": "joint", "movement": 1, "speed": self._config["default_speed"]["joint"], "j0": 0, "jerk": list(self._config["default_jerk"]["joint"])}},{"command": "g2core", "prm": "{tt32:n}"}, {"command": "set_motion", "prm": self._config["motion"]}]
]
job.pop(1)
# number of jobs
_init_num = [0]
for j in job:
_init_num.append(_init_num[-1] +len(j))
for i, j in enumerate(job):
result = self.play(j, False)
result = json.loads(result)
if type(result)!= list or len(result) == 0:
return self._port_close()
self._connect_percentage(_init_num[i], _init_num[-1], result, 5 + time.time() + 2*len(result))
# wait a little bit
_time = time.time()
while self._device["state"]!= 0 and time.time() < _time + 1:
time.sleep(0.005)
# make sure the state is 0
if self._device["state"] != 0:
return self._port_close()
self._device["connection"] =2
self._device["port"] = port_name
result = self.device()
self._log_add(json.loads(result), "device")
return result
except Exception as x:
_printx(self._prnt,"error: ",x)
return self._port_close()
def set_joint(self, prm, append = False):
# joints are valid
if any(self._joint == None):
return None
# json or not
try:
prm = json.loads(prm)
except:
pass
# dictionary or list
if type(prm) == list: # list
if len(prm) > 6:
return None
prm_tmp = {"j"+str(i): prm[i] for i in range(len(prm))}
prm = prm_tmp
if type(prm) == dict:
if not all([x in ["j0", "j1", "j2", "j3", "j4", "j5"] for x in prm.keys()]):
return None
# not empty
if not prm:
return None
# set joint
command = [{"command": "set_joint", "prm": prm}]
result = self.play(command, append)
result = json.loads(result)
"""
if result["id"] == None:
return None
wait = self._wait_for_job(result, time.time(), 1)
if wait == "timeout":
return None
"""
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+1)
if not wait:
return None
# home robot
home_robot = []
if "j0" in prm:
home_robot.append("x")
if "j1" in prm:
home_robot.append("y")
if "j2" in prm:
home_robot.append("z")
if "j3" in prm or "j4" in prm:
home_robot.append("a")
home_robot.append("b")
if "j5" in prm:
home_robot.append("c")
# tt32
prm = {"tt32": {x: 1 for x in home_robot}}
prm = json.dumps(prm)
prm = prm.replace('"', "")
command = {"command": "g2core", "prm": prm}
result = self.play(command, False)
result = json.loads(result)
"""
if result["id"] == None:
return None
wait = self._wait_for_job(result, time.time(), 1)
if wait == "timeout":
return None
"""
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+1)
if not wait:
return None
# get the last tt32
command = {"command": "g2core", "prm": "{tt32:n}"}
result = self.play(command, False)
result = json.loads(result)
"""
if result["id"] == None:
return None
wait = self._wait_for_job(result, time.time(), 1)
if wait == "timeout":
return None
"""
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+1)
if not wait:
return None
return self.homed()
# args: j0, j1, j2, j3 or j4
# list or JSON list
def home(self, prm):
try:
prm = json.loads(prm)
except:
pass
# string or list
if type(prm) != list:
prm = [prm]
result = None
T = False
for joint in prm:
if joint in ["j3", "j4"]:
# Only home 3 and 4 once
if not T:
result = self._home_joint_3_4()
T = True
else:
result = self._home_joint(joint)
return result
# args: j0, j1, j2, j3 or j4
# list or JSON list
def home_backup(self, prm):
try:
prm = json.loads(prm)
except:
pass
# string or list
if type(prm) != list:
prm = [prm]
result = None
for joint in prm:
result = self._home_joint(joint)
return result
"""
# remove all the probes
{dixfn: 0}
add the probe
{di4fn: 4}
G38.3
Homing Procedure Notes
[Procedure originally came with api.py, but rewritten by Ben Snell,
@bensnell on github.com]
- Requires that the constant probe_offset is correctly set. This constant
is an additional float value defined in the config.yaml file. This value
is set like such:
...
homing:
probe_offset: 30.0
...
This is a constant offset to the probing distance for these joints.
This value is different from one robot to another, and depends
on how the magnets in the wrist are aligned during the
assembly process. The purpose of this constant is to ensure that
joint 3 homes to the same value every time. This constant ensures that
the values returned from probing are reliably offset each time
to result in the same homing calculations.
- Joint 3 homing is repeatable as long as j3 is pointed outward,
in the direction of its parent arm, within 90 degrees in
either direction. The outward direction of joint 3 looks like this:
___
_________________/ \__
_/___ | | |
<--- ---|-|__•__|-| | • | |
\________________| |_|
| |
| |
| |
- Joint 4 homing is repeatable as long as it is kept within 180
degrees of zero.
- In order for any homing procedure to work right now, neither of the
joint 3 or 4 red homing lights may be on
"""
def _home_joint_3_4(self):
_input = [3,4]
# set_joint
self.set_joint({"j3": 0, "j4": 0}, True)
# Get the probe offset
probe_offset = 0.0
if "homing" in self._config and "probe_offset" in self._config["homing"]:
tmp = self._config["homing"]["probe_offset"]
if type(tmp) == int or type(tmp) == float:
probe_offset = float(tmp)
else:
print("Probe offset was not defined correctly in \"config.yaml\"")
# remove all the probe inputs
for i in range(1,10):
command = "{di"+str(i)+"fn: 0}"
self.play({"command": "g2core", "prm":command}, append = True)
time.sleep(0.5)
# add di4fn: 4
self.play({"command": "g2core", "prm":"{di4fn: 4}"}, append = True)
time.sleep(0.5)
# probe toward j4 -> 360
_result = self.probe({"j"+str(_input[-1]): 360, "speed": 5000}, append = True)
if _result == None:
return None
_result = json.loads(_result)
t3 = _result[4] - probe_offset
# back to where it started
command = {"command": "move", "prm": {"path": "joint", "movement": 0, "j4": 0, "speed": 5000}}
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+1000)
if not wait:
return None
# remove all the probe inputs
for i in range(1,10):
command = "{di"+str(i)+"fn: 0}"
self.play({"command": "g2core", "prm":command}, append = True)
time.sleep(0.5)
# add di5fn: 4
self.play({"command": "g2core", "prm":"{di5fn: 4}"}, append = True)
time.sleep(0.5)
# probe toward j4 -> -360
_result = self.probe({"j"+str(_input[-1]): -360, "speed": 5000}, append = True)
if _result == None:
return None
_result = json.loads(_result)
t4 = -_result[4] - probe_offset
# back to where it started
command = {"command": "move", "prm": {"path": "joint", "movement": 0, "j4": 0, "speed": 5000}}
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+1000)
if not wait:
return None
# Calculate the joint offsets
# j3 will be in the range [-90, 90)
# j4 will be in the range [-180, 180)
j3 = wrap(-0.5 * (wrap(t3,-180,180) + wrap(t4,-180,180)) , -90, 90)
j4 = 0.5 * (wrap(t4,-180,180) - wrap(t3,-180,180))
# Apply the calibration offsets saved in the yaml file
return self.set_joint({"j3": self._config["calibrate"]["j3"] + j3, "j4": self._config["calibrate"]["j4"] + j4}, True)
def _home_joint(self, joint):
if joint not in ["j0","j1", "j2", "j3", "j4"]:
return None
# homing
# job_1: home
command = {"command": "home", "prm": [joint]}
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+120)
if not wait:
return None
# calibration
if joint in ["j3", "j4"]:
clb = {"j3": self._config["calibrate"]["j3"], "j4": self._config["calibrate"]["j4"]}
else:
clb = {joint: self._config["calibrate"][joint]}
# set_joint
return self.set_joint(clb)
def add_180(self):
if any(self._joint == None):
return None
joint = np.copy(self._joint)
if not self.set_joint({"j3": joint[3]+ 180, "j4": joint[4]+ 180}):
return None
return self.position("joint")
# prm = {"j0":1, "j2":}
def probe(self, prm, append = False):
# read json
try:
prm = json.loads(prm)
except:
pass
# probing
# job_1: probe
command = {"command": "probe", "prm": prm}
result = self.play(command, append)
result = json.loads(result)
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+360)
if not wait:
return None
# read probe
# job2
self._system["probe"]["e"] = 0
command = {"command": "g2core", "prm": "{prb:n}"}
result = self.play(command, False)
result = json.loads(result)
if len(result) == 0:
return None
wait = self._wait_for_command(result, time.time()+360)
if not wait:
return None
if self._system["probe"]["e"]:
try:
_probe_travel = [self._system["probe"][k] for k in ["x", "y", "z", "a", "b", "c"]]
_probe_joint = self._travel_to_joint(_probe_travel)
return json.dumps(_probe_joint[0:self._config["axis"]["number"]].tolist())
except Exception as ex:
pass
return None
# prm = {"j0":1, "j2": ,... }
def calibrate(self, prm):
# robot is homed
home = list(json.loads(self.homed()).values())
if not all(home[0:5]):
return None
# current joint
joint = np.copy(self._joint)
# read json
try:
prm = json.loads(prm)
except:
pass
# dictionary or list
if type(prm) == list: # list
if len(prm) > 6:
return None
prm_tmp = {"j"+str(i): prm[i] for i in range(len(prm))}
prm = prm_tmp
if type(prm) == dict:
if not all([x in ["j0", "j1", "j2", "j3", "j4", "j5"] for x in prm.keys()]):
return None
# not empty
if not prm:
return None
# set_joint
if not self.set_joint(prm):
return None
"""
# load config file
with open(self._device["config"], 'r') as stream:
_config_tmp = yaml.load(stream)
"""
# update config
if "j0" in prm:
self._config["calibrate"]["j0"] = self._config["calibrate"]["j0"] + prm["j0"]- joint[0]
#_config_tmp["calibrate"]["j0"] = self._config["calibrate"]["j0"]
if "j1" in prm:
self._config["calibrate"]["j1"] = self._config["calibrate"]["j1"] + prm["j1"]- joint[1]
#_config_tmp["calibrate"]["j1"] = self._config["calibrate"]["j1"]
if "j2" in prm:
self._config["calibrate"]["j2"] = self._config["calibrate"]["j2"] + prm["j2"]- joint[2]
#_config_tmp["calibrate"]["j2"] = self._config["calibrate"]["j2"]
if "j3" in prm:
self._config["calibrate"]["j3"] = (self._config["calibrate"]["j3"] + prm["j3"]- joint[3])%360
#_config_tmp["calibrate"]["j3"] = self._config["calibrate"]["j3"]
if "j4" in prm:
self._config["calibrate"]["j4"] = (self._config["calibrate"]["j4"] + prm["j4"]- joint[4])%360
#_config_tmp["calibrate"]["j4"] = self._config["calibrate"]["j4"]
"""
# save calibrate
with open(self._device["config"], 'w') as yaml_file:
yaml.dump(_config_tmp, yaml_file, default_flow_style=False)
"""
# add to log
self.config()
return self.position("joint")
def calibrate_backup(self, prm):
# robot is homed
home = list(json.loads(self.homed()).values())
if not all(home[0:5]):
return None
# current joint
joint = np.copy(self._joint)
# read json
try:
prm = json.loads(prm)
except:
pass
# dictionary or list
if type(prm) == list: # list
if len(prm) > 6:
return None
prm_tmp = {"j"+str(i): prm[i] for i in range(len(prm))}
prm = prm_tmp
if type(prm) == dict:
if not all([x in ["j0", "j1", "j2", "j3", "j4", "j5"] for x in prm.keys()]):
return None
# not empty
if not prm:
return None
# check prm and validate
if "j0" in prm and any([prm["j0"] > self.limit_base[1], prm["j0"] < self.limit_base[0]]):
return None
if "j1" in prm and any([prm["j1"] > self.limit_base[1], prm["j1"] < self.limit_base[0]]):
return None
if "j2" in prm and any([prm["j2"] > self.limit_base[1], prm["j2"] < self.limit_base[0]]):
return None
# set_joint
if not self.set_joint(prm):
return None
# update config
if "j0" in prm:
self._config["calibrate"]["j0"] = self._config["calibrate"]["j0"] + prm["j0"]- joint[0]
if "j1" in prm:
self._config["calibrate"]["j1"] = self._config["calibrate"]["j1"] + prm["j1"]- joint[1]
if "j2" in prm:
self._config["calibrate"]["j2"] = self._config["calibrate"]["j2"] + prm["j2"]- joint[2]
if "j3" in prm:
self._config["calibrate"]["j3"] = (self._config["calibrate"]["j3"] + prm["j3"]- joint[3])%360
if "j4" in prm:
self._config["calibrate"]["j4"] = (self._config["calibrate"]["j4"] + prm["j4"]- joint[4])%360
# add to log
self.config()
return self.position("joint")
# return: True False
def _wait_for_command(self, command_list, max_time):
while len(command_list) and time.time() < max_time and self._device["connection"]:
if command_list[0]["id"] <= self._system["command"][5][-1]["id"]:
command_list.pop(0)
else:
time.sleep(0.02)
if len(command_list):
return False
return True
def _append_commands(self,commands):
command_id_list = list(range(self._system["command_id"]+1, self._system["command_id"]+len(commands)+1))
# append M2
commands += [{"command": "g2core", "prm": "M2", "display": False}]
f_commands = self._format_command(commands)
# add commands
self._system["command"][0] += f_commands
# add to log
#_cmd_log = [{"id": cmd["id"],"key": cmd["key"], "state": cmd["state"]} for cmd in f_commands]
self._log_add( [{"id": cmd["id"],"key": cmd["key"], "state": cmd["state"]} for cmd in f_commands], "line_update")
# change device state to running
self._device["state"] = 1
# add to log
#result = self.device()
self._log_add(json.loads(self.device()), "device")
return command_id_list
def _flush_commands(self, halt = True):
if halt:
# valid commands id
self._system["valid_command_id"] = self._system["command_id"] + 1
# copy ???
#_command_list = list(self._system["command"][0:3])
_command_list = list(self._system["command"][0:4])
# clean ???
#self._system["command"][0:3] = [[], [], []]
self._system["command"][0:4] = [[], [], [], []]
_result = self.play({"command": "halt", "display": False})
_time = time.time()
while self._device["state"] != 0 and time.time() < _time + 1:
time.sleep(0.001)
# send jobs to log
self._log_add([{"id": cmd["id"], "key":cmd["key"], "state":-1} for X in _command_list for cmd in X], "line_update" )
"""
???
for _command in _command_list:
_command = self._command_mask(_command)
for _cmd in _command:
self._log_add(json.dumps(_cmd), "halt_command")
"""
# update travel final
if self._device["state"] == 0:
self._system["travel_final"] = np.copy(self._travel)
return True
self._log_add(True, "halt")
else:
# copy
_command_list = list(self._system["command"][0:2])
# clean
self._system["command"][0:2] = [[], []]
# update travel final
if self._system["command"][2]:
self._system["travel_final"] = np.copy(self._system["command"][2][0]["travel_final"])
elif self._system["command"][3]:
self._system["travel_final"] = np.copy(self._system["command"][3][0]["travel_final"])
elif self._system["command"][4]:
self._system["travel_final"] = np.copy(self._system["command"][4][0]["travel_final"])
elif self._system["command"][5]:
self._system["travel_final"] = np.copy(self._system["command"][5][0]["travel_final"])
else:
self._system["travel_final"] = np.copy(self._travel)
# add new job
self.play({"command": "g2core", "prm": "{id:n}", "display": False})
# add it to log
self._log_add([{"id": cmd["id"], "key":cmd["key"], "state":-1} for X in _command_list for cmd in X], "line_update" )
"""
???
# send jobs to log
for _command in _command_list:
_command = self._command_mask(_command)
for _cmd in _command:
self._log_add(json.dumps(_cmd), "pause_command")
self._log_add(True, "pause")
"""
return _command_list
return False
"""
append: append the commands to the last job
if the job is clsoe to complete then it will wait for it and submit it as a new job
fulfill
[{"command_id": len(commands), "job_id":self._job["id"], "command": "g2core", "prm": "M2" + " n "+ str(2* self._job["id"] + 1)}]
"""
def play(self, commands, append = True):
# connection exists
if self._device["connection"] == 0:
_rtn = {"error": 1 , "message": "the device is not connected"}
self._log_add(_rtn, "play")
return json.dumps(_rtn)
# sanitate
commands = self._sanitate_command(commands)
# key
try:
key = commands[0]["key"]
except:
key = None
if not commands:
_rtn = {"error": 2 , "message": "not a valid format", "key": key}
self._log_add(_rtn, "play")
return json.dumps(_rtn)
# system is stopped == 0
# system is running == 1
"""
Find the last M2 and remove it
"""
# update travel
if self._device["state"] == 0:
self._system["travel_final"] = np.copy(self._travel)
if append or self._device["state"] == 0:
id_list = self._append_commands(commands)
#return self.command(id = id_list)
data = self.command({"id": id_list})
return data
else:
_flush_result = self._flush_commands()
if _flush_result:
return self.play(commands)
_rtn = {"error": 3 , "message": "timeout, try again", "key": key}
return json.dumps(_rtn)
"""
empty commands 0, 1, 2
"""
def pause(self):
return json.dumps(self._flush_commands(False))
"""
if error and not fulfill then do not run that command
# command[0] process and moves to command[1]
"""
def _command_thread(self):
while not self._stop:
self._command_compile()
time.sleep(0.001)
"""
Every single command is a dict
If there is any error, we send it all to the canceled commands
self._system["command"][6]
"""
def _command_compile(self):
# not disconnected
if self._device["connection"] == 0:
return False
# get the last command[0]
try:
#_command = dict(self._system["command"][0][0])
_command = copy.deepcopy(self._system["command"][0][0])
if "prm" in _command:
_prm = copy.deepcopy(_command["prm"])
else:
_prm = False
except:
return False
"""
# make sure it is synced
# if not synced then this command is already in the canceled
if _command and _command["id"] < self._system["valid_command_id"]:
return False
"""
# json command to gc_list
gcs = self._json_to_method(_command) #{'gc_list', 'travel_final', "status", "message"}
# system is synced
# if not synced then this command is already in the canceled
if _command["id"] == self._system["command"][0][0]["id"]:
self._system["command"][0].pop(0)
else:
# add to log ???
self._log_add([{"id": _command["id"], "key": _command["key"], "state": -1}], "line_update")
return False
# wrong format: just ignore the command and add it to the canceled
if type(gcs) is not dict:
# modify command
#_command["state"] = 6
_command["error"] = 1
_command["message"] = "wrong format"
# add it to cancled
#self._system["command"][6].append(_command)
#self._log_add(_command, "ignore") ???
# add to log ???
self._log_add([{"id": _command["id"], "key": _command["key"], "state": -1}], "line_update")
return False
# state modification
_command["state"] = 1
# message
if "message" in gcs:
_command["message"] = gcs["message"]
"""
error
if there is an error and fulfill is false
then send evrything in command 0 to the cancled
"""
if "status" in gcs:
_command["error"] = gcs["status"]
if not _command["fulfill"] and _command["error"]:
# add other commands except M2
commands = [_command] + list(self._system["command"][0][0:-1])
self._system["command"][0]= self._system["command"][0][-1]
for command in commands:
command["message"] = "canceled because of an error in the command with the id : " + str(_command["id"])
#_cmd_log = [{"id": cmd["id"], "key": cmd["key"], "state": -1} for cmd in commands]
self._log_add( [{"id": cmd["id"], "key": cmd["key"], "state": -1} for cmd in commands], "line_update")
return False
"""
???
# add _command
#_command["state"] = 6
#self._system["command"][6].append(_command)
self._log_add(_command, "error")
# add other commands except M2
commands = list(self._system["command"][0][0:-1])
self._system["command"][0]= self._system["command"][0][0:-1]
for command in commands:
#command["state"] = 6
command["message"] = "canceled because of an error in the command with the id : " + str(_command["id"])
#self._system["command"][6].append(command)
self._log_add(command, "error")
"""
# no error
if "gc_list" in gcs:
# not empty
_command["gc"] = self._method_to_gc(gcs["gc_list"], _command["id"])
if "travel_final" in gcs:
_command["travel_final"] = gcs["travel_final"]
self._system["travel_final"] = gcs["travel_final"]
# append to list 1
if "prm" in _command:
_command["prm"] = _prm
self._system["command"][1].append(_command)
# add to log ???
self._log_add([{"id": _command["id"], "key": _command["key"], "state": _command["state"]}], "line_update")
return True
def _send(self):
_gc = True
while not self._stop: # main loop
if self._device["connection"] > 0:
# still gcodes in the list
try:
if type(_gc) != dict:
# get gcode from the list
_gc = {"id": self._system["command"][2][0]["id"],
"gc": self._system["command"][2][0]["gc"].pop(0)
}
_gc = self._process_gc(_gc)
except:
# move the command from 2 to 3
try:
_command = self._system["command"][2].pop(0)
_command["state"] = 3
self._system["command"][3] += [_command]
self._log_add([{"id": _command["id"], "key": _command["key"], "state": _command["state"]}], "line_update")
except:
pass
# move the command from 1 to 2
try:
_command = self._system["command"][1].pop(0)
_command["state"] = 2
self._system["command"][2] += [_command]
self._log_add([{"id": _command["id"], "key": _command["key"], "state": _command["state"]}], "line_update")
except:
pass
time.sleep(0.001)
def _receive(self):
_error = 0
while not self._stop: # main loop
if self._device["connection"] > 0:
try:
rsp = self._port_read()
if rsp:
self._process_response(rsp)
_error = 0
except Exception as ex:
_printx(self._prnt,"test_3", ex)
_error += 1
if _error > self._read_error:
self._port_close()
_error = 0
time.sleep(.005)
def _method_to_gc(self, gc_list, command_id = None):
# send_list
_send_list = []
for gc in gc_list:
if type(gc) is str:
_send_list.append(gc)
elif type(gc) is dict:
_send_list.append(json.dumps(gc))
# add line
if _send_list and command_id is not None:
if "{" in _send_list[-1]: # dictionary format
_send_list.append(" n "+ str(2* command_id + 2))
elif all([x not in _send_list[-1] for x in [" n ", "!", "%", "~"]]):
_send_list[-1] += " n "+ str(2* command_id + 2)
return _send_list
def _json_to_method(self, command):
try:
if "prm" in command:
return getattr(self, "_"+command['command'])(command['prm'])
else:
return getattr(self, "_"+command['command'])()
except Exception as x:
_printx(self._prnt,x)
return False
"""
with every command send change the device state to 1
"""
def _process_gc(self, gc):
# check format
if type(gc) != dict or "gc" not in gc or "id" not in gc:
return False
# system is synced
if gc["id"] < self._system["valid_command_id"]:
return False
# ignore m2
if "M2" in gc["gc"] and len(self._system["command"][0]) + len(self._system["command"][1]):
return False
"""
# system is bussy
if not all([self._system["lines_to_send"] > 0, self._system["qr"] > 40]):
return gc
"""
# system is bussy
if gc["gc"] in ["!" , "%"]:
pass
elif not all([self._system["lines_to_send"] > 0, self._system["qr"] > 40]):
return gc
# send command
_printx(self._prnt,"send: ", gc["gc"])
# add log
self._log_add(gc["gc"], "send")
send_command = gc["gc"] + '\n'
self._port.write(send_command.encode())
# update line and line to send
line = [x for x in gc["gc"].split('\n') if x]
self._system["lines_to_send"] -= len([x for x in line if x not in ['%', '~', '!']])
if gc["gc"] == "%":
self._system["lines_to_send"] = 4
_printx(self._prnt,"lines to send: ", self._system["lines_to_send"])
# sleep
sleep = 0.35 - 0.00625*self._system["qr"]
_printx(self._prnt,"sleep: :", sleep)
time.sleep(sleep)
return True
def _process_response(self, response):
# response
if 'r' in response:
self._system["lines_to_send"] = min(4, self._system["lines_to_send"] + 1)
#_r = dict(response["r"])
_r = copy.deepcopy(response["r"])
if "f" in response:
_r["f"] = response["f"]
self._process_response(_r)
return True
# status report
if 'sr' in response:
# we should find the position updates if there is any
#_r = dict(response["sr"])
_r = copy.deepcopy(response["sr"])
self._process_response(_r)
return True
_printx(self._prnt,"receive: ", response)
# add to log
self._log_add(response, "receive")
# qr update
self._qr_update(response)
# travel update
self._travel_update(response)
# id
self._id_update(response)
# probe
self._probe_update(response)
# gpa, jt, ct
self._motion_update(response)
# homing
self._home_update(response)
# firmware version
self._fv_update(response)
# io update
self._io_update(response)
# id
self._line_update(response)
# end of script
self._end_of_script(response)
def _qr_update(self, response):
if 'qr' in response and type(response["qr"]) == int:
self._system["qr"] = response["qr"]
# update line to send
if "qi" in response and "qo" in response:
if response["qr"] == 48 and response["qi"] == 0 and response["qo"] == 0:
self._system["lines_to_send"] = 4
def _id_update(self, response):
if "id" in response:
self._device["id"] = response["id"]
def _probe_update(self, response):
if "prb" in response:
for k in response["prb"]:
self._system["probe"][k] = response["prb"][k]
def _motion_update(self, response):
if "gpa" in response and type(response["gpa"]) != str:
self._config["motion"]["gpa"] = response["gpa"]
if "jt" in response and type(response["jt"]) != str:
self._config["motion"]["jt"] = response["jt"]
if "ct" in response and type(response["ct"]) != str:
self._config["motion"]["ct"] = response["ct"]
def _fv_update(self, response):
if "fv" in response:
self._device["fv"] = response["fv"]
def _io_update(self, response):
change = False
# this is when outputs are not in response
if "do6mo" in response:
change = True
self._io["out1"] = response["do6mo"]
if "do7mo" in response:
change = True
self._io["out2"] = response["do7mo"]
if "do8mo" in response:
change = True
self._io["out3"] = response["do8mo"]
if "do9mo" in response:
change = True
self._io["out4"] = response["do9mo"]
if "do10mo" in response:
change = True
self._io["out5"] = response["do10mo"]
###
if "out1" in response:
change = True
self._io["out1"] = response["out1"]
if "out2" in response:
change = True
self._io["out2"] = response["out2"]
if "out3" in response:
change = True
self._io["out3"] = response["out3"]
if "out4" in response:
change = True
self._io["out4"] = response["out4"]
if "out5" in response:
change = True
self._io["out5"] = response["out5"]
if "do1mo" in response:
change = True
self._io["do1mo"] = response["do1mo"]
if "do2mo" in response:
change = True
self._io["do2mo"] = response["do2mo"]
if "do3mo" in response:
change = True
self._io["do3mo"] = response["do3mo"]
if "do4mo" in response:
change = True
self._io["do4mo"] = response["do4mo"]
if "in1" in response:
change = True
self._io["in1"] = response["in1"]
if "in7" in response:
change = True
self._io["in2"] = response["in7"]
if "in8" in response:
change = True
self._io["in3"] = response["in8"]
if "in9" in response:
change = True
self._io["in4"] = response["in9"]
if "di1mo" in response:
change = True
self._io["di1mo"] = response["di1mo"]
if "di7mo" in response:
change = True
self._io["di2mo"] = response["di7mo"]
if "di8mo" in response:
change = True
self._io["di3mo"] = response["di8mo"]
if "di9mo" in response:
change = True
self._io["di4mo"] = response["di9mo"]
if change:
self._log_add(json.loads(self.io()), "io")
def _travel_update(self, response):
mapping = {"posx": 0, "posy": 1, "posz": 2, "posa": 3, "posb": 4, "posc": 5}
joint = list(set(mapping.keys()) & set(response.keys()))
# update travel
for j in joint:
self._travel[mapping[j]] = response[j]
# update joint xyz
if joint:
try:
self._joint = self._travel_to_joint(np.copy(self._travel))
self._xyz = self._travel_to_xyz(np.copy(self._travel))
# add to log
self._log_add(json.loads(self.position()), "joint")
self._log_add(json.loads(self.position("xyz")), "xyz")
except:
pass
def _end_of_script(self, response):
if "stat" in response:
# update stat
self._system["state"] = response["stat"]
if response["stat"] == 4 and self._device["state"] == 0.5:
self._device["state"] = 0
self._log_add(json.loads(self.device()), "device")
def _line_update(self, response):
_command_update = []
if "line" in response:
if response["line"]%2 == 0:
command_id = (response["line"]-2)/2
# check for completing
if command_id == self._system["command_id"]:
self._device["state"] = 0.5
self._log_add(json.loads(self.device()), "device")
# 4 to 5
try:
while self._system["command"][4][0]["id"] < command_id:
_command = self._system["command"][4].pop(0)
_command["state"] = 5
self._system["command"][5].append(_command)
# ??? limit length
self._system["command"][5] = self._system["command"][5][-100:]
# add to log
_command_update.append({"id": _command["id"], "state": 5,"key": _command["key"]})
except:
pass
# 3 to 5
try:
while self._system["command"][3][0]["id"] < command_id:
_command = self._system["command"][3].pop(0)
_command["state"] = 5
self._system["command"][5].append(_command)
# ??? limit length
self._system["command"][5] = self._system["command"][5][-100:]
# add to log
_command_update.append({"id": _command["id"], "state": 5, "key": _command["key"]})
except:
pass
# 3 to 4
try:
while self._system["command"][3][0]["id"] == command_id:
_command = self._system["command"][3].pop(0)
_command["state"] = 4
self._system["command"][4].append(_command)
# add to log
_command_update.append({"id": _command["id"], "state": 4, "key": _command["key"]})
except:
pass
if _command_update:
self._log_add(_command_update, "line_update")
def _home_update(self, response):
# update home_robot
update = False
if "tt32" in response:
home_key = list(set(response["tt32"].keys()) & set(self._home_robot.keys()))
_printx(self._prnt,"tt32: ", home_key)
for k in home_key:
update = True
self._home_robot[k] = response["tt32"][k]
# add to log
if update:
self._log_add(json.loads(self.homed()), "homed")
# update home_system
home_key = list(set(response.keys()) & set(self._home_system))
for k in home_key:
self._home_system[k] = response[k]
# =================================================================
# configuration
# =================================================================
"""
if ["unit"]["length"] == "mm"
bring everything to inch
"""
def _init_config(self):
# Read YAML file
with open(self._device["config"], 'r') as stream:
self._config = yaml.load(stream, Loader=yaml.SafeLoader)
if self._config["unit"]["length"] == "mm":
# speed_xyz
self._config["default_speed"]["xyz"] = self._mm_to_inch(self._config["default_speed"]["xyz"])
# jerk_xyz
self._config["default_jerk"]["xyz"] = self._jerk_mm_to_inch(self._config["default_jerk"]["xyz"])
# toolhead
for k in self._config["toolhead"]:
self._config["toolhead"][k] = self._mm_to_inch(self._config["toolhead"][k])
def save_config(self,save_path = None):
if save_path:
#self._config_path = save_path
self._device["config"] = save_path
with open(self._device["config"], 'w') as yaml_file:
_config = self.config()
_config = json.loads(_config)
yaml.dump(_config, yaml_file, default_flow_style=False)
# add log
result = self.device()
self._log_add(json.loads(result), "device")
return result
def scale(self):
self._log_add(self._scale, "scale")
return json.dumps(self._scale)
def set_scale(self , prm):
try:
prm = json.loads(prm)
except:
pass
if type(prm) is dict and "speed" in prm and 0 < prm["speed"] <= 1:
self._scale["speed"] = prm["speed"]
if type(prm) is dict and "jerk" in prm and 0 < prm["jerk"] <= 1:
self._scale["jerk"] = prm["jerk"]
# modify
return self.scale()
def config(self, prm = None):
try:
prm = json.loads(prm)
except:
pass
# copy
tmp_config = copy.deepcopy(self._config)
# length
tmp_config["default_jerk"]["joint"] = tmp_config["default_jerk"]["joint"][0:tmp_config["axis"]["number"]]
tmp_config["default_jerk"]["xyz"] = tmp_config["default_jerk"]["xyz"][0:tmp_config["axis"]["number"]]
# mm
if tmp_config["unit"]["length"] == "mm":
# speed_xyz
tmp_config["default_speed"]["xyz"] = self._inch_to_mm(tmp_config["default_speed"]["xyz"])
# jerk_xyz
tmp_config["default_jerk"]["xyz"] = self._jerk_inch_to_mm(tmp_config["default_jerk"]["xyz"])
# toolhead
for k in tmp_config["toolhead"]:
tmp_config["toolhead"][k] = self._inch_to_mm(tmp_config["toolhead"][k])
# display keys
if prm:
_display_key = [k for k in tmp_config if k in prm]
_rtn = {k: tmp_config[k] for k in _display_key}
self._log_add(_rtn, "config")
return json.dumps(_rtn)
self._log_add(tmp_config, "config")
return json.dumps(tmp_config)
def axis(self):
return self.config(["axis"])
def set_axis(self , prm):
try:
prm = json.loads(prm)
except:
pass
if type(prm) is dict and "number" in prm and prm["number"] in [5, 6]:
self._config["axis"]["number"] = prm["number"]
# modify
return self.axis()
def unit(self):
#_rtn = self.config(["unit"])
#self._log_add(json.loads(_rtn), "unit")
return self.config(["unit"])
def set_unit(self , prm):
try:
prm = json.loads(prm)
except:
pass
if type(prm) is dict and "length" in prm and prm["length"] in ["inch", "mm"]:
self._config["unit"]["length"] = prm["length"]
# modify
return self.config()
def motion(self):
return self.config(["motion"])
#self._log_add(result, "motion")
#return result
def default_speed(self):
return self.config(["default_speed"])
def set_default_speed(self , prm):
try:
prm = json.loads(prm)
except:
pass
# update config
if "joint" in prm:
self._config["default_speed"]["joint"] = prm["joint"]
if "xyz" in prm:
if self._config["unit"]["length"] == "mm":
self._config["default_speed"]["xyz"] = self._mm_to_inch(prm["xyz"])
else:
self._config["default_speed"]["xyz"] = prm["xyz"]
return self.default_speed()
def default_jerk(self):
return self.config(["default_jerk"])
def set_default_jerk(self , prm):
try:
prm = json.loads(prm)
except:
pass
# update config
if "joint" in prm:
if type(prm["joint"]) == list and len(prm["joint"]) == self._config["axis"]["number"]:
self._config["default_jerk"]["joint"][0:self._config["axis"]["number"]] = prm["joint"]
if "xyz" in prm:
if type(prm["xyz"]) == list and len(prm["xyz"]) == self._config["axis"]["number"]:
if self._config["unit"]["length"] == "mm":
self._config["default_jerk"]["xyz"][0:self._config["axis"]["number"]] = self._jerk_mm_to_inch(prm["xyz"])
else:
self._config["default_jerk"]["xyz"][0:self._config["axis"]["number"]] = prm["xyz"]
return self.default_jerk()
def _inch_to_mm(self, x):
if x == None:
return None
return x * 25.4
def _mm_to_inch(self, x):
if x == None:
return None
return x/25.4
def _jerk_mm_to_inch(self, jerk):
# xyz
_jerk_tmp = [self._mm_to_inch(x) for x in jerk[0:3]]
# abc
_jerk_tmp += jerk[3:]
return _jerk_tmp
def _jerk_inch_to_mm(self, jerk):
# xyz
#_jerk_tmp = [self._mm_to_inch(x) for x in jerk[0:3]]
_jerk_tmp = [self._inch_to_mm(x) for x in jerk[0:3]]
# abc
_jerk_tmp += jerk[3:]
return _jerk_tmp
"""
limit {"j0": [min, max], "j1": [min, max],...}
if soemthins is None then we ignore it
"""
def set_limit(self, limit):
# json
try:
limit = json.loads(limit)
except:
pass
if "j0" in limit and len(limit["j0"]) == 2 and limit["j0"][0]<= limit["j0"][1] :
self._config["limit"]["j0"] = limit["j0"]
if "j1" in limit and len(limit["j1"]) == 2 and limit["j1"][0]<= limit["j1"][1] :
self._config["limit"]["j1"] = limit["j1"]
if "j2" in limit and len(limit["j2"]) == 2 and limit["j2"][0]<= limit["j2"][1] :
self._config["limit"]["j2"] = limit["j2"]
# save limit
#self.save_config()
return self.limit()
def set_limit_backup(self, limit):
# json
try:
limit = json.loads(limit)
except:
pass
if "j0" in limit and len(limit["j0"]) == 2 and self.limit_base[0]<=limit["j0"][0]<= limit["j0"][1] <= self.limit_base[1]:
self._config["limit"]["j0"] = limit["j0"]
if "j1" in limit and len(limit["j1"]) == 2 and self.limit_base[0]<=limit["j1"][0]<= limit["j1"][1] <= self.limit_base[1]:
self._config["limit"]["j1"] = limit["j1"]
if "j2" in limit and len(limit["j2"]) == 2 and self.limit_base[0]<=limit["j2"][0]<= limit["j2"][1] <= self.limit_base[1]:
self._config["limit"]["j2"] = limit["j2"]
# save limit
#self.save_config()
return self.limit()
def limit(self):
#result = self.config(["limit"])
# log toolhead
#self._log_add({"limit": json.loads(result)}, "config")
return self.config(["limit"])
"""
status:
0: limit not passed
100: limit passed
joint: indices of joints that passed the limit
"""
def _limit_check(self, joint):
limit_passed = [i for i in range(len(self._config["limit"])) if not(self._config["limit"]["j"+ str(i)][0] <= joint[i] <= self._config["limit"]["j"+ str(i)][1])]
status = 0
if limit_passed:
status = 100
return {"status": status, "joint": limit_passed}
# =================================================================
# utility
# =================================================================
"""
forward kinematics: joint to xyz
"""
def f_k(self, joint):
try:
# joint to radian
teta = [math.radians(j) for j in joint]
# first we find x, y, z assuming base rotation is zero (teta_0 = 0). Then we rotate everything
# then we rotate the robot around z axis for teta_0
tmp = self._bx + self._l1 * math.cos(teta[1]) + self._l2 * math.cos(teta[1] + teta[2]) + self._config["toolhead"]["x"] * math.cos(teta[1] + teta[2] + teta[3])
x = tmp * math.cos(teta[0])
y = tmp * math.sin(teta[0])
z = self._bz + self._l1 * math.sin(teta[1]) + self._l2 * math.sin(teta[1] + teta[2]) + self._config["toolhead"]["x"] * math.sin(teta[1] + teta[2] + teta[3])
alpha = teta[1] + teta[2] + teta[3]
beta = teta[4]
alpha = math.degrees(alpha)
beta = math.degrees(beta)
_rtn = [x, y, z]
if self._config["unit"]["length"] == "mm":
_rtn = [self._inch_to_mm(c) for c in _rtn]
return _rtn +[alpha, beta] + joint[5:]
except:
return None
"""
inverse kinematics: xyz to joint
"""
def i_k(self, xyz):
try:
x = xyz[0]
y = xyz[1]
z = xyz[2]
if self._config["unit"]["length"] == "mm":
x = self._mm_to_inch(x)
y = self._mm_to_inch(y)
z = self._mm_to_inch(z)
alpha = xyz[3]
beta = xyz[4]
alpha = math.radians(alpha)
beta = math.radians(beta)
# first we find the base rotation
teta_0 = math.atan2(y, x)
# next we assume base is not rotated and everything lives in x-z plane
x = math.sqrt(x ** 2 + y ** 2)
# next we update x and z based on base dimensions and hand orientation
x -= (self._bx + self._config["toolhead"]["x"] * math.cos(alpha))
z -= (self._bz + self._config["toolhead"]["x"] * math.sin(alpha))
# at this point x and z are the summation of two vectors one from lower arm and one from upper arm of lengths l1 and l2
# let L be the length of the overall vector
# we can calculate the angle between l1 , l2 and L
L = math.sqrt(x ** 2 + z ** 2)
L = np.round(L,13) # ???
# not valid
if L > (self._l1 + self._l2) or self._l1 > (self._l2 + L) or self._l2 > (self._l1 + L): # in this case there is no solution
return None
# init status
status = 0
if L > (self._l1 + self._l2) - self._delta_e or self._l1 > (self._l2 + L) - self._delta_e: # in this case there is no solution
status = 1
teta_l1_L = math.acos((self._l1 ** 2 + L ** 2 - self._l2 ** 2) / (2 * self._l1 * L)) # l1 angle to L
teta_L_x = math.atan2(z, x) # L angle to x axis
teta_1 = teta_l1_L + teta_L_x
# note that the other solution would be to set teta_1 = teta_L_x - teta_l1_L. But for the dynamics of the robot the first solution works better.
teta_l1_l2 = math.acos((self._l1 ** 2 + self._l2 ** 2 - L ** 2) / (2 * self._l1 * self._l2)) # l1 angle to l2
teta_2 = teta_l1_l2 - math.pi
teta_3 = alpha - teta_1 - teta_2
teta_4 = beta
teta_0 = math.degrees(teta_0)
teta_1 = math.degrees(teta_1)
teta_2 = math.degrees(teta_2)
teta_3 = math.degrees(teta_3)
teta_4 = math.degrees(teta_4)
return [teta_0, teta_1, teta_2, teta_3, teta_4] + xyz[5:]
except:
return None
"""
input: joint
output: xyz
"""
def _joint_to_xyz(self, joint):
if any(joint == None):
return np.array([None for i in range(len(joint))])
# joint to radian
teta_0 = math.radians(joint[0])
teta_1 = math.radians(joint[1])
teta_2 = math.radians(joint[2])
teta_3 = math.radians(joint[3])
teta_4 = math.radians(joint[4])
# first we find x, y, z assuming base rotation is zero (teta_0 = 0). Then we rotate everything
# then we rotate the robot around z axis for teta_0
tmp = self._bx + self._l1 * math.cos(teta_1) + self._l2 * math.cos(teta_1 + teta_2) + self._config["toolhead"]["x"] * math.cos(teta_1 + teta_2 + teta_3)
x = tmp * math.cos(teta_0)
y = tmp * math.sin(teta_0)
z = self._bz + self._l1 * math.sin(teta_1) + self._l2 * math.sin(teta_1 + teta_2) + self._config["toolhead"]["x"] * math.sin(teta_1 + teta_2 + teta_3)
alpha = teta_1 + teta_2 + teta_3
beta = teta_4
alpha = math.degrees(alpha)
beta = math.degrees(beta)
if len(joint) == 6:
return np.array([x, y, z, alpha, beta, joint[5]]) # [x, y, z, alpha, beta, joints[5]]
else:
return np.array([x, y, z, alpha, beta]) # [x, y, z, alpha, beta]
"""
status: 0: valid and safe xyz
status: 1: valid but not safe xyz
status: 2: not a valid xyz
"""
def _xyz_to_joint(self,xyz):
if any(xyz == None): # xyz contains None coordinate
return {"joint": np.array([None for i in range(len(xyz))]), "status": 2}
x = xyz[0]
y = xyz[1]
z = xyz[2]
alpha = xyz[3]
beta = xyz[4]
alpha = math.radians(alpha)
beta = math.radians(beta)
# first we find the base rotation
teta_0 = math.atan2(y, x)
# next we assume base is not rotated and everything lives in x-z plane
x = math.sqrt(x ** 2 + y ** 2)
# next we update x and z based on base dimensions and hand orientation
x -= (self._bx + self._config["toolhead"]["x"] * math.cos(alpha))
z -= (self._bz + self._config["toolhead"]["x"] * math.sin(alpha))
# at this point x and z are the summation of two vectors one from lower arm and one from upper arm of lengths l1 and l2
# let L be the length of the overall vector
# we can calculate the angle between l1 , l2 and L
L = math.sqrt(x ** 2 + z ** 2)
L = np.round(L,13) # ???
# not valid
if L > (self._l1 + self._l2) or self._l1 > (self._l2 + L) or self._l2 > (self._l1 + L): # in this case there is no solution
return {"joint": np.array([None for i in range(len(xyz))]), "status": 2}
# init status
status = 0
if L > (self._l1 + self._l2) - self._delta_e or self._l1 > (self._l2 + L) - self._delta_e: # in this case there is no solution
status = 1
teta_l1_L = math.acos((self._l1 ** 2 + L ** 2 - self._l2 ** 2) / (2 * self._l1 * L)) # l1 angle to L
teta_L_x = math.atan2(z, x) # L angle to x axis
teta_1 = teta_l1_L + teta_L_x
# note that the other solution would be to set teta_1 = teta_L_x - teta_l1_L. But for the dynamics of the robot the first solution works better.
teta_l1_l2 = math.acos((self._l1 ** 2 + self._l2 ** 2 - L ** 2) / (2 * self._l1 * self._l2)) # l1 angle to l2
teta_2 = teta_l1_l2 - math.pi
teta_3 = alpha - teta_1 - teta_2
teta_4 = beta
teta_0 = math.degrees(teta_0)
teta_1 = math.degrees(teta_1)
teta_2 = math.degrees(teta_2)
teta_3 = math.degrees(teta_3)
teta_4 = math.degrees(teta_4)
if len(xyz) == 6:
joint = np.array([teta_0, teta_1, teta_2, teta_3, teta_4, xyz[5]])
else:
joint = np.array([teta_0, teta_1, teta_2, teta_3, teta_4])
return {"joint": joint, "status": status}
# return: np.array
def _travel_to_joint(self, travel):
try:
if travel[2] >= 800: # travel is joint
travel[2] -= 1000
joint = np.copy(travel)
joint[3] = 0.5 * (travel[4] - travel[3] )
joint[4] = 0.5 * (-travel[4] - travel[3] )
return joint
else: # travel is xyz
return self._xyz_to_joint(travel)["joint"]
except:
return np.array([None for i in range(len(travel))])
def _travel_to_xyz(self, travel):
try:
if travel[2] >= 800: # travel is joint
joint = self._travel_to_joint(travel)
return self._joint_to_xyz(joint)
return travel
except:
return np.array([None for i in range(len(travel))])
def _joint_to_travel(self, joint):
joint[2] += 1000
travel = np.copy(joint)
travel[3] = -joint[3]-joint[4]
travel[4] = joint[3]-joint[4]
#return joint
return travel
def _xyz_to_travel(self, xyz):
return xyz
def _joint_validate(self, joint_init, joint_final):
joint_achieve = np.copy(joint_final)
status = 0
message = ""
for i in range(len(self._config["limit"])):
if joint_init[i] > self._config["limit"]["j"+ str(i)][1]:
# above
joint_achieve[i] = max(joint_final[i], self._config["limit"]["j"+ str(i)][0])
message = "initial position (joint: "+str(joint_init)+") is out of limit"
status = 100
elif self._config["limit"]["j"+ str(i)][0] <= joint_init[i] <= self._config["limit"]["j"+ str(i)][1]:
# between
joint_achieve[i] = min(max(joint_final[i], self._config["limit"]["j"+ str(i)][0]),self._config["limit"]["j"+ str(i)][1] )
else:
# under
joint_achieve[i] = min(joint_final[i],self._config["limit"]["j"+ str(i)][1])
message = "initial position (joint: "+str(joint_init)+") is out of limit"
status = 0
if not np.array_equal(joint_achieve, joint_final):
message = "final position (joint: "+str(joint_final)+") is out of limit"
status = 100
return {"status": status, "joint_achieve": joint_achieve, "joint_final": joint_final, "message": message}
def _joint_validate_backup(self, joint_init, joint_final):
message = ""
joint_achieve = np.copy(joint_final)
# check for init
check_init = self._limit_check(joint_init)
if check_init["status"] != 0: # no need to check final
message = "initial position (joint: "+str(joint_init)+") is out of limit"
return {"status": check_init["status"], "joint_achieve": joint_achieve, "joint_final": joint_final, "message": message}
# find the achieve
for i in range(len(self._config["limit"])):
joint_achieve[i] = min(max(self._config["limit"]["j"+ str(i)][0], joint_achieve[i]),self._config["limit"]["j"+ str(i)][1] )
status = 0
if not np.array_equal(joint_achieve, joint_final):
status = 100
message = "final position (joint: "+str(joint_final)+") is out of limit"
return {"status": status, "joint_achieve": joint_achieve, "joint_final": joint_final, "message": message}
"""
give two xyz points, find the midle and see if the middle is achievable
"""
def _xyz_achieve(self, xyz_init, xyz_final):
xyz_middle = (xyz_init + xyz_final)/2
result = self._xyz_to_joint(xyz_middle)
joint = result["joint"]
status = result["status"]
if status or self._limit_check(joint)["status"]:
return [np.copy(xyz_init), np.copy(xyz_middle)]
return [np.copy(xyz_middle), np.copy(xyz_final)]
def _line_validate(self, xyz_init, xyz_final):
message = ""
# check xyz_init
joint = self._xyz_to_joint(xyz_init)["joint"]
if any(joint == None):
message = "initial position is not valid"
#return {"status": 100, "xyz_achieve": None, "xyz_final": xyz_final, "message": message}
return {"status": 100, "xyz_achieve": np.array([None for _ in range(len(xyz_init))]), "xyz_final": xyz_final, "message": message}
if self._limit_check(joint)["status"]:
message = 'initial position is out of limit, in this case use the "move" command and set the "path":"joint" to get out of the limit zone'
return {"status": 100, "xyz_achieve": np.array([None for _ in range(len(xyz_init))]), "xyz_final": xyz_final, "message": message}
# same point
if np.array_equal(xyz_final, xyz_init):
return {"status": 0, "xyz_achieve": xyz_final, "xyz_final": xyz_final}
# rotation around B (j4)
if np.array_equal(xyz_final[0:4], xyz_init[0:4]):
return {"status": 0, "xyz_achieve": xyz_final, "xyz_final": xyz_final}
#direction
direction = xyz_final - xyz_init
L = math.floor(np.linalg.norm(direction)/self._segment_size)
direction = self._segment_size*direction/np.linalg.norm(direction)
xyz_achieve = np.copy(xyz_init)
for i in range(1,L+1):
xyz = xyz_init + i * direction
# check xyz
result = self._xyz_to_joint(xyz)
joint = result["joint"]
status = result["status"]
if status or self._limit_check(joint)["status"]:
xyz_r = xyz
for i in range(10):
[xyz_achieve, xyz_r] = self._xyz_achieve(xyz_achieve, xyz_r)
message = "achievable position is (xyz: " + str(xyz_achieve) +")"
return {"status": 100, "xyz_achieve": xyz_achieve, "xyz_final": xyz_final, "message": message}
else:
xyz_achieve = np.copy(xyz)
# xyz_final validate
xyz = np.copy(xyz_final)
result = self._xyz_to_joint(xyz)
joint = result["joint"]
status = result["status"]
if status or self._limit_check(joint)["status"]:
xyz_r = xyz
for i in range(10):
[xyz_achieve, xyz_r] = self._xyz_achieve(xyz_achieve, xyz_r)
message = "achievable position is (xyz: " + str(xyz_achieve) +")"
return {"status": 100, "xyz_achieve": xyz_achieve, "xyz_final": xyz_final, "message": message}
return {"status": 0, "xyz_achieve": xyz_final, "xyz_final": xyz_final}
def _move_to_gc(self,travel_final, prm):
gc = 'X{:07.4f} Y{:07.4f} Z{:07.4f} A{:07.4f} B{:07.4f} C{:07.4f}'.format(travel_final[0], travel_final[1], travel_final[2], travel_final[3], travel_final[4], travel_final[5])
if "gc" in prm:
gc = prm["gc"] + gc
else:
gc = "G1 " + gc
try:
gc = gc + 'F{:07.4f}'.format(prm["speed"]*self._scale["speed"])
except:
pass
gc = 'G90 ' + gc
return gc
def _move_to_gc_backup(self,travel_final, prm):
gc = 'X{:07.4f} Y{:07.4f} Z{:07.4f} A{:07.4f} B{:07.4f} C{:07.4f}'.format(travel_final[0], travel_final[1], travel_final[2], travel_final[3], travel_final[4], travel_final[5])
if "gc" in prm:
gc = prm["gc"] + gc
else:
gc = "G1 " + gc
try:
gc = gc + 'F{:07.4f}'.format(prm["speed"])
except:
pass
gc = 'G90 ' + gc
return gc
def _move_to_gc_backup_(self,travel_final, prm):
gc = 'X{:07.4f} Y{:07.4f} Z{:07.4f} A{:07.4f} B{:07.4f} C{:07.4f}'.format(travel_final[0], travel_final[1], travel_final[2], travel_final[3], travel_final[4], travel_final[5])
gc = 'G1 ' + gc
try:
gc = gc + 'F{:07.4f}'.format(prm["speed"])
except:
pass
gc = 'G90 ' + gc
return gc
def _ref_change(self, travel):
return 'G28.3X{:07.4f} Y{:07.4f} Z{:07.4f} A{:07.4f} B{:07.4f} C{:07.4f}'.format(travel[0], travel[1], travel[2], travel[3], travel[4], travel[5])
def _joint_final(self, prm, joint_init):
if 'j0' in prm:
joint_init[0] = prm['j0'] + prm["movement"]*joint_init[0]
if 'j1' in prm:
joint_init[1] = prm['j1'] + prm["movement"]*joint_init[1]
if 'j2' in prm:
joint_init[2] = prm['j2'] + prm["movement"]*joint_init[2]
if 'j3' in prm:
joint_init[3] = prm['j3'] + prm["movement"]*joint_init[3]
if 'j4' in prm:
joint_init[4] = prm['j4'] + prm["movement"]*joint_init[4]
if 'j5' in prm:
joint_init[5] = prm['j5'] + prm["movement"]*joint_init[5]
return joint_init
def _xyz_final (self, prm, xyz_init):
if 'x' in prm:
xyz_init[0] = prm['x'] + prm["movement"]*xyz_init[0]
if 'y' in prm:
xyz_init[1] = prm['y'] + prm["movement"]*xyz_init[1]
if 'z' in prm:
xyz_init[2] = prm['z'] + prm["movement"]*xyz_init[2]
if 'a' in prm:
xyz_init[3] = prm['a'] + prm["movement"]*xyz_init[3]
if 'b' in prm:
xyz_init[4] = prm['b'] + prm["movement"]*xyz_init[4]
if 'c' in prm:
xyz_init[5] = prm['c'] + prm["movement"]*xyz_init[5]
return xyz_init
# {"command": "M100", "prm":{"out5":1, "out1":0}}
def _form_io(self,prm):
"""
prm = json.dumps(prm)
prm = prm.replace('"', "")
return {'gc_list': ["M100("+prm+")"], 'status':0}
"""
# this is when outputs are not showing
"""
outputs with
1->6
2->7
3->8
4->9
5->10
inputs
1->1
2->7
3->8
4->9
"""
#prm_tmp = dict(prm)
prm_tmp = copy.deepcopy(prm)
for x in prm:
if x in ["in2", "in3", "in4"]:
# add new key
prm_tmp["in"+str(int(x[-1]) + 5)] = prm_tmp[x]
# remove x
prm_tmp.pop(x)
elif x in ["di2mo", "di3mo", "di4mo"]:
# add new key
prm_tmp["di"+str(int(x[2:-2]) + 5)+"mo"] = prm_tmp[x]
# remove x
prm_tmp.pop(x)
elif x in ["out1", "out2", "out3", "out4", "out5"]:
prm_tmp["do"+str(int(x[-1])+ 5)+"mo"] = prm[x]
elif x in ["do6mo", "do7mo", "do8mo", "do9mo", "do10mo"]:
prm_tmp["out"+str(int(x[2:-2])- 5)] = prm[x]
elif x in ["do1mo", "do2mo", "do3mo", "do4mo", "do5mo"]:
prm_tmp["do"+str(int(x[2:-2])+ 5)+"mo"] = 0
prm_tmp["out"+x[2:-2]] = 0
prm = prm_tmp
prm = json.dumps(prm)
return prm.replace('"', "")
def _M100 (self, prm):
prm = self._form_io(prm)
return {'gc_list': ["M100("+prm+")"], 'status':0}
def _M100_backup (self, prm):
"""
prm = json.dumps(prm)
prm = prm.replace('"', "")
return {'gc_list': ["M100("+prm+")"], 'status':0}
"""
# this is when outputs are not showing
"""
outputs with
1->6
2->7
3->8
4->9
5->10
inputs
1->1
2->7
3->8
4->9
"""
#prm_tmp = dict(prm)
prm_tmp = copy.deepcopy(prm)
for x in prm:
if x in ["in2", "in3", "in4"]:
# add new key
prm_tmp["in"+str(int(x[-1]) + 5)] = prm_tmp[x]
# remove x
prm_tmp.pop(x)
elif x in ["di2mo", "di3mo", "di4mo"]:
# add new key
prm_tmp["di"+str(int(x[2:-2]) + 5)+"mo"] = prm_tmp[x]
# remove x
prm_tmp.pop(x)
elif x in ["out1", "out2", "out3", "out4", "out5"]:
prm_tmp["do"+str(int(x[-1])+ 5)+"mo"] = prm[x]
elif x in ["do6mo", "do7mo", "do8mo", "do9mo", "do10mo"]:
prm_tmp["out"+str(int(x[2:-2])- 5)] = prm[x]
elif x in ["do1mo", "do2mo", "do3mo", "do4mo", "do5mo"]:
prm_tmp["do"+str(int(x[2:-2])+ 5)+"mo"] = 0
prm_tmp["out"+x[2:-2]] = 0
prm = prm_tmp
prm = json.dumps(prm)
prm = prm.replace('"', "")
return {'gc_list': ["M100("+prm+")"], 'status':0}
# {"command": "M100", "prm":{"in7":1}}
def _M101 (self, prm):
prm = json.dumps(prm)
prm = prm.replace('"', "")
return {'gc_list':["M101("+prm+")"], 'status':0}
# =================================================================
# method
# =================================================================
#{"command": "move", "prm": {"movement": "relative", "speed": 1000.0, "path": "line", "segment_size": 0.01, "j0": 0.0, "j1": 0.0, "j2": 0.0, "j3": 0.0, "j4": 0.0}}
"""
command: move
movement: absolute:0, relative:1
speed
path: line, circle, joint
input:j or x
"""
def _move(self,prm):
if "joint" in prm:
for i in range(len(prm["joint"])):
if prm["joint"][i] is not None:
prm["j"+str(i)] = prm["joint"][i]
prm.pop("joint", None)
if "xyz" in prm:
_map = ["x", "y", "z", "a", "b", "c"]
for i in range(len(prm["xyz"])):
if prm["xyz"][i] is not None:
prm[_map[i]] = prm["xyz"][i]
prm.pop("xyz", None)
# speed
if "speed" in prm and prm["speed"] <= 0:
return {'gc_list':[], 'status':100 , "message": "not a valid format"}
"""
unit
position (xyz)
speed
jerk (not joint)
"""
if self._config["unit"]["length"] == "mm":
_key = [k for k in ["x", "y", "z"] if k in prm]
for k in _key:
prm[k] = self._mm_to_inch(prm[k])
if "speed" in prm and prm["path"] != "joint":
prm["speed"] = self._mm_to_inch(prm["speed"])
if "jerk" in prm and prm["path"] != "joint":
_jerk = [self._mm_to_inch(x) for x in prm["jerk"]]
prm["jerk"] = self._jerk_mm_to_inch(prm["jerk"])
if "path" in prm and prm["path"] == "joint": # move_joint
return self._move_joint(prm)
elif "path" in prm and prm["path"] == "line": # move_line
return self._move_line(prm)
return {'gc_list':[], 'status':100 , "message": "not a valid format"}
def _move_joint(self, prm):
travel_init = np.copy(self._system["travel_final"])
joint_init = self._travel_to_joint(np.copy(self._system["travel_final"]))
# joint_final
if any(['j0' in prm, 'j1' in prm, 'j2' in prm, 'j3' in prm, 'j4' in prm, 'j5' in prm]):
joint_final = self._joint_final(prm, np.copy(joint_init))
elif any(['x' in prm, 'y' in prm, 'z' in prm, 'a' in prm, 'b' in prm, 'c' in prm]):
xyz_init = self._joint_to_xyz(np.copy(joint_init))
xyz_final = self._xyz_final (prm, xyz_init)
result = self._xyz_to_joint(xyz_final)
joint_final = result["joint"]
if result["status"]:
return {'gc_list': [], 'travel_final':np.copy(self._system["travel_final"]), 'status':result["status"], "message": "final position is not valid"}
# validate joint_final
result = self._joint_validate(joint_init, joint_final)
joint_achieve = result["joint_achieve"]
# travel final
prm_switch = False
travel_final = self._joint_to_travel(np.copy(joint_achieve))
gc_list = []
if travel_init[2] < 800:
gc_list.append(self._ref_change(self._joint_to_travel(joint_init)))
prm_switch = True
# jerk
if "jerk" in prm:
jrk = self._jerk(prm["jerk"])
gc_list += self._M100(jrk)["gc_list"]
else:
if prm_switch:
#jrk = self._jerk(list(self._config["default_jerk"]["joint"].values()))
jrk = self._jerk(list(self._config["default_jerk"]["joint"]))
gc_list += self._M100(jrk)["gc_list"]
# speed
if not "speed" in prm and prm_switch:
prm["speed"] = self._config["default_speed"]["joint"]
gc_list.append(self._move_to_gc(travel_final, prm))
return {'gc_list': gc_list, 'travel_final':travel_final, 'status':result["status"], "message": result["message"]}
def _move_line(self,prm):
travel_init = np.copy(self._system["travel_final"])
xyz_init = self._travel_to_xyz(np.copy(self._system["travel_final"]))
# xyz_final
if any(['j0' in prm, 'j1' in prm, 'j2' in prm, 'j3' in prm, 'j4' in prm, 'j5' in prm]):
joint_init = self._xyz_to_joint(np.copy(xyz_init))["joint"]
joint_final = self._joint_final (prm, joint_init)
xyz_final = self._joint_to_xyz(joint_final)
elif any(['x' in prm, 'y' in prm, 'z' in prm, 'a' in prm, 'b' in prm, 'c' in prm]):
xyz_final = self._xyz_final (prm, np.copy(xyz_init))
# validate xyz_final
# send error if result is not valid ???
result = self._line_validate(xyz_init, xyz_final)
xyz_achieve = result["xyz_achieve"]
if any(xyz_achieve == None):
return {'gc_list':[], 'status':result["status"], "message": result["message"]}
# travel final
prm_switch = False
travel_final = self._xyz_to_travel(xyz_achieve)
gc_list = []
if travel_init[2] >= 800:
prm_switch = True
gc_list.append(self._ref_change(self._xyz_to_travel(xyz_init)))
###
#jrk = {k: v[1] for k,v in rbt["jerk"]["xyz"]["job"].items()}
#gc_list += self.jerk(jrk)["gc_list"]
# jerk
if "jerk" in prm:
jrk = self._jerk(prm["jerk"])
gc_list += self._M100(jrk)["gc_list"]
else:
if prm_switch:
#jrk = self._jerk(list(self._config["default_jerk"]["xyz"].values()))
jrk = self._jerk(list(self._config["default_jerk"]["xyz"]))
gc_list += self._M100(jrk)["gc_list"]
# speed
if not "speed" in prm and prm_switch:
prm["speed"] = self._config["default_speed"]["xyz"]
gc_list.append(self._move_to_gc(travel_final, prm))
if "message" in result:
return {'gc_list':gc_list, 'travel_final':travel_final, 'status':result["status"], "message": result["message"]}
return {'gc_list':gc_list, 'travel_final':travel_final, 'status':result["status"]}
"""
Supported params:
offsets (I,J,K): center offset from current point
end point (X,Y,Z): End point (optional)
P: number of turns should be integer
circle axis (M): Should be one of 0 (Z)(default), 1(Y), 2(X)
movement: 0 absolute, 1 relative (default)
rotation_dirxn: 0 cw (default), 1 (ccw)
"""
def move_circle(self, prm, fulfill=True, append=True):
try:
prm = json.loads(prm)
except:
pass
# Set into cartesian coordinates
self.play({"command": "move", "prm": {"path": "line", "movement": 1, "x":0}}, True)
gcodes = []
circle_axis = 0
if 'movement' in prm and prm['movement'] == 0:
gcodes.append('G90')
else:
gcodes.append('G91')
if 'M' in prm:
gcodes.append('G'+str(17+prm['M']))
circle_axis=prm['M']
else:
gcodes.append('G17')
if (circle_axis == 0 and 'K' in prm or
circle_axis == 1 and 'J' in prm or
circle_axis == 2 and 'I' in prm):
print("Cannot provide offset along circle axis")
circle_command = ''
if 'rotation_dirxn' in prm and prm['rotation_dirxn'] == 1:
circle_command = circle_command + 'G3'
else:
circle_command = circle_command + 'G2'
for key in ['I', 'J', 'K', 'X', 'Y', 'Z', 'P']:
if key in prm:
circle_command = circle_command + ' ' + key + str(prm[key])
gcodes.append(circle_command)
for gcode in gcodes:
self.play({"command": "g2core", "prm": {'gc': gcode}}, append=True)
# {"command": "halt", "prm":0}
def _halt(self):
gc_list = ["!", "%"]
_printx(self._prnt,"halt: ")
return {'gc_list':gc_list, 'status':0}
# *{"command": "probe", "prm": {"j0": , "j1": , ..., "speed"}}
def _probe(self, prm):
tmp_prm = {"movement": 0,
"speed": 100,
"jerk": [300, 300, 300, 300, 300, 300],
"gc": "G38.3 ",
"path": "joint"}
tmp_prm.update(prm)
"""
prm["movement"] = 0
prm["speed"] = 100
prm["jerk"] = [300, 300, 300, 300, 300, 300]
prm["gc"] = "G38.3 "
prm["path"] = "joint"
"""
return self._move(tmp_prm)
# *{"command": "homing", "prm": ["j0", "j1", ...]}
def _home(self, args):
_printx(self._prnt,"args: ", args)
gc_list = []
if self._system["travel_final"][2] < 800:
joint = self._travel_to_joint(np.copy(self._system["travel_final"]))
gc_list.append(self._ref_change(self._joint_to_travel(joint)))
#home = {}
command = ""
if "j0" in args:
command += "x0"
if "j1" in args:
command += "y0"
if "j2" in args:
command += "z0"
if "j3" in args or "j4" in args:
command += "a0b0"
if command:
gc_list.append("G28.2" + command)
return {'gc_list': gc_list, 'status':0}
return False
# {"command": "set_joint", "prm": {"j0": 1.1, ...}}
def _set_joint(self, prm):
joint_current = self._travel_to_joint(np.copy(self._system["travel_final"]))
if 'j0' in prm:
joint_current[0] = prm['j0']
if 'j1' in prm:
joint_current[1] = prm['j1']
if 'j2' in prm:
joint_current[2] = prm['j2']
if 'j3' in prm:
joint_current[3] = prm['j3']
if 'j4' in prm:
joint_current[4] = prm['j4']
if 'j5' in prm:
joint_current[5] = prm['j5']
travel = self._joint_to_travel(joint_current)
command_send = "G28.3 "
if 'j0' in prm:
command_send = command_send + "X{:07.4f}".format(travel[0])
if 'j1' in prm:
command_send = command_send + "Y{:07.4f}".format(travel[1])
if 'j2' in prm:
command_send = command_send + "Z{:07.4f}".format(travel[2])
if 'j3' in prm or "j4" in prm:
command_send = command_send + "A{:07.4f}".format(travel[3])
command_send = command_send + "B{:07.4f}".format(travel[4])
"""
if 'j3' in prm:
command_send = command_send + "A{:07.4f}".format(travel[3])
if 'j4' in prm:
command_send = command_send + "B{:07.4f}".format(travel[4])
"""
if 'j5' in prm:
command_send = command_send + "C{:07.4f}".format(travel[5])
return {'gc_list':[command_send], 'status':0, "travel_final": travel}
# {"command": "gcode", "prm":{"gc": , ...}}
def _gcode (self, prm):
if type(prm) != dict or "gc" not in prm:
return {'gc_list':[], 'status':100 , "message": "not a valid format"}
tmp_prm = {"path": "line", "movement": 0}
gc_prm = {}
line = prm["gc"].strip()
line = line.lower()
keys = list(set(re.findall("[xyzabcuvgf]+", line)))
for k in keys:
regexp = k + r"\s*(-?\d+(?:\.\d+)?)"
values = re.findall(regexp, line)
tmp = []
for v in values:
try:
tmp.append(float(v))
except:
pass
gc_prm[k] = tmp
if k == "g":
for v in gc_prm[k]:
if v < 1.1:
tmp_prm["method"] = "move"
elif v == 90:
tmp_prm["movement"] = 0
elif v == 91:
tmp_prm["movement"] = 1
elif k == "f" and gc_prm[k]:
tmp_prm["speed"] = gc_prm[k][0]
elif gc_prm[k]:
tmp_prm[k] = gc_prm[k][0]
# make sure method exists
if "method" in tmp_prm:
# remove method key
method = tmp_prm["method"]
tmp_prm.pop('method', None)
# remove gc parameter
prm.pop("gc", None)
# merge two dictionaries
tmp_prm.update(prm)
return getattr(self, "_"+method)(tmp_prm)
# jerk : [1200, 500, 100, ...] ???
def _jerk(self, jrk):
_map = ["xjm", "yjm", "zjm", "ajm", "bjm", "cjm"]
send = {}
for i in range(min(len(_map), len(jrk))):
#send[_map[i]] = jrk[i]
send[_map[i]] = jrk[i] * self._scale["jerk"]
return send
# {"command": "servo", "prm":500}
def _servo(self, prm):
self._io["servo"] = prm
cmd = []
cmd.append("M3S"+str(prm))
return {'gc_list':cmd, 'status':0}
# {"command": "laser", "prm":1}
def _laser(self, prm):
result = self._M100({"out5": prm})
#result["gc_list"] = result["gc_list"]
return result
# {"command": "g2core", "prm":}
def _g2core (self, prm):
return {'gc_list':[prm], 'status':0}
# {"command": "sleep", "prm":500}
def _sleep(self, prm):
cmd = []
cmd.append("G4P"+str(prm))
return {'gc_list':cmd, 'status':0}
# {"command": "output", "prm":{"out1": 0, "out2":1, "out3":1}}
def _output(self, prm):
#output_key = list(set(prm.keys()) & set(["out1", "out2","out3","out4"]))
#output = {x: int(prm[x])%2 for x in output_key }
result = self._M100(prm)
#result["gc_list"] = result["gc_list"]
return result
# "laser", "servo", "di1mo", "out1", "do1mo"
def _set_io(self, prm):
valid_m100 = ["di1mo", "di2mo", "di3mo", "di4mo", "do1mo" ,"do2mo", "do3mo", "do4mo", "out1", "out2", "out3", "out4", "out5"]
cmd = []
# laser
if "laser" in prm:
prm["out5"] = prm["laser"]
# M100
# di or do (1,2,3,4)mo, out(1,2,3,4 ,5)
tmp = {x: prm[x] for x in prm if x in valid_m100}
result = self._M100(tmp)
# servo
if "servo" in prm:
result["gc_list"] += self._servo(prm["servo"])["gc_list"]
return result
# "laser", "servo", "di1mo", "out1", "do1mo"
def _set_io_async(self, prm):
valid_m100 = ["di1mo", "di2mo", "di3mo", "di4mo", "do1mo" ,"do2mo", "do3mo", "do4mo", "out1", "out2", "out3", "out4", "out5"]
cmd = []
# laser
if "laser" in prm:
prm["out5"] = prm["laser"]
# M100
# di or do (1,2,3,4)mo, out(1,2,3,4 ,5)
tmp = {x: prm[x] for x in prm if x in valid_m100}
#result = self._M100(tmp)
tmp = self._form_io(tmp)
result = {'gc_list': [tmp], 'status':0}
# servo
if "servo" in prm:
result["gc_list"] += self._servo(prm["servo"])["gc_list"]
return result
# {"command": "input", "prm":{"in1": 0, "in2":1, "in3":1}}
def _wait_for_input(self, prm):
result = False
if "in1" in prm:
mode = [False, True][prm["in1"]]
result = self._M101({"in1": mode})
elif "in2" in prm:
mode = [False, True][prm["in2"]]
result = self._M101({"in7": mode})
elif "in3" in prm:
mode = [False, True][prm["in3"]]
result = self._M101({"in8": mode})
elif "in4" in prm:
mode = [False, True][prm["in4"]]
result = self._M101({"in9": mode})
if result:
result["gc_list"] = result["gc_list"]
return result
# {"command": "set_motion", "prm":{"jt": 1, "ct": 0.01, "gpa": 2}}
def _set_motion(self, prm):
gc_list = []
if "jt" in prm:
gc_list.append("{jt : "+str(prm["jt"]) +"}")
if "ct" in prm:
gc_list.append("{ct : "+str(prm["ct"]) +"}")
if "pcm" in prm:
gc_list.append("{gpa : "+str(prm["gpa"]) +"}")
return {'gc_list':gc_list, 'status':0, "travel_final": np.copy(self._system["travel_final"])}
def motion(self):
return self.config(["motion"])
# {"command": "set_toolhead", "prm":{"x": 1.74}}
def _set_toolhead(self, prm):
# sanitation
# initialization
travel = np.copy(self._system["travel_final"])
joint = self._travel_to_joint(np.copy(self._system["travel_final"]))
travel_final = self._joint_to_travel(np.copy(joint))
gc_list = []
status = 0
# change ik to joint
if travel[2] < 800:
gc_list.append(self._ref_change(travel_final))
# update tool length
gc_list.append("G10L1P1X"+ str(prm["x"]))
# update config
#self._config["toolhead"]["x"] = prm["x"]
return {'gc_list':gc_list, 'status':0, "travel_final": travel_final}
def toolhead(self):
#result = self.config(["toolhead"])
# log toolhead
#self._log_add({"toolhead": json.loads(result)}, "config")
return self.config(["toolhead"])
|
crawl.py
|
import json
import os
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing import Process
from django.core.management.base import BaseCommand
from django.utils.translation import gettext_lazy as _
from default.extract_sphinx import process
class Command(BaseCommand):
help = "crawls a directory, and prepares the data to POST to the API"
def add_arguments(self, parser):
parser.add_argument("directory", help=_("the directory to crawl"))
parser.add_argument("base-url", help=_("the URL at which the directory will be deployed"))
parser.add_argument("secret", help=_("the secret value to authenticate with the API"))
def handle(self, *args, **options):
host = "localhost"
port_number = 8331
def http_server():
os.chdir(options["directory"])
HTTPServer((host, port_number), SimpleHTTPRequestHandler).serve_forever()
p = Process(target=http_server)
data = {}
try:
p.start()
for entry in os.scandir(options["directory"]):
if not entry.is_dir():
continue
data[entry.name] = process(f"http://{host}:{port_number}/{entry.name}/", options["base_url"])
finally:
p.terminate()
json.dump({"secret": options["secret"], "base_url": options["base_url"], "data": data}, sys.stdout)
|
windows.py
|
from ...third_party import WebsocketServer # type: ignore
from .configurations import ConfigManager
from .configurations import WindowConfigManager
from .diagnostics import ensure_diagnostics_panel
from .logging import debug
from .logging import exception_log
from .message_request_handler import MessageRequestHandler
from .panels import log_server_message
from .promise import Promise
from .protocol import Diagnostic
from .protocol import DiagnosticSeverity
from .protocol import DocumentUri
from .protocol import Error
from .protocol import Location
from .sessions import get_plugin
from .sessions import Logger
from .sessions import Manager
from .sessions import Session
from .sessions import SessionBufferProtocol
from .sessions import SessionViewProtocol
from .settings import userprefs
from .transports import create_transport
from .types import ClientConfig
from .types import matches_pattern
from .typing import Optional, Any, Dict, Deque, List, Generator, Tuple, Iterable, Sequence, Union
from .url import parse_uri
from .views import extract_variables
from .views import make_link
from .workspace import ProjectFolders
from .workspace import sorted_workspace_folders
from abc import ABCMeta
from abc import abstractmethod
from collections import OrderedDict
from collections import deque
from subprocess import CalledProcessError
from time import time
from weakref import ref
from weakref import WeakSet
import functools
import json
import sublime
import threading
import urllib.parse
_NO_DIAGNOSTICS_PLACEHOLDER = " No diagnostics. Well done!"
class AbstractViewListener(metaclass=ABCMeta):
TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY = "lsp_total_errors_and_warnings"
view = None # type: sublime.View
@abstractmethod
def session_async(self, capability_path: str, point: Optional[int] = None) -> Optional[Session]:
raise NotImplementedError()
@abstractmethod
def sessions_async(self, capability_path: Optional[str] = None) -> Generator[Session, None, None]:
raise NotImplementedError()
@abstractmethod
def session_views_async(self) -> Iterable[SessionViewProtocol]:
raise NotImplementedError()
@abstractmethod
def on_session_initialized_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def on_session_shutdown_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def diagnostics_async(self) -> Iterable[Tuple[SessionBufferProtocol, Sequence[Tuple[Diagnostic, sublime.Region]]]]:
raise NotImplementedError()
@abstractmethod
def diagnostics_intersecting_region_async(
self,
region: sublime.Region
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
@abstractmethod
def diagnostics_touching_point_async(
self,
pt: int,
max_diagnostic_severity_level: int = DiagnosticSeverity.Hint
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
def diagnostics_intersecting_async(
self,
region_or_point: Union[sublime.Region, int]
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
if isinstance(region_or_point, int):
return self.diagnostics_touching_point_async(region_or_point)
elif region_or_point.empty():
return self.diagnostics_touching_point_async(region_or_point.a)
else:
return self.diagnostics_intersecting_region_async(region_or_point)
@abstractmethod
def on_diagnostics_updated_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def on_code_lens_capability_registered_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def get_language_id(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_uri(self) -> str:
raise NotImplementedError()
@abstractmethod
def do_signature_help_async(self, manual: bool) -> None:
raise NotImplementedError()
@abstractmethod
def navigate_signature_help(self, forward: bool) -> None:
raise NotImplementedError()
@abstractmethod
def on_post_move_window_async(self) -> None:
raise NotImplementedError()
def extract_message(params: Any) -> str:
return params.get("message", "???") if isinstance(params, dict) else "???"
def set_diagnostics_count(view: sublime.View, errors: int, warnings: int) -> None:
try:
key = AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY
if userprefs().show_diagnostics_count_in_view_status:
view.set_status(key, "E: {}, W: {}".format(errors, warnings))
else:
view.erase_status(key)
except Exception:
pass
class WindowManager(Manager):
DIAGNOSTIC_PHANTOM_KEY = "lsp_diagnostic_phantom"
def __init__(
self,
window: sublime.Window,
workspace: ProjectFolders,
configs: WindowConfigManager,
) -> None:
self._window = window
self._configs = configs
self._sessions = WeakSet() # type: WeakSet[Session]
self._workspace = workspace
self._pending_listeners = deque() # type: Deque[AbstractViewListener]
self._listeners = WeakSet() # type: WeakSet[AbstractViewListener]
self._new_listener = None # type: Optional[AbstractViewListener]
self._new_session = None # type: Optional[Session]
self._diagnostic_phantom_set = None # type: Optional[sublime.PhantomSet]
self._panel_code_phantoms = None # type: Optional[sublime.PhantomSet]
self.total_error_count = 0
self.total_warning_count = 0
sublime.set_timeout(functools.partial(self._update_panel_main_thread, _NO_DIAGNOSTICS_PLACEHOLDER, []))
def get_config_manager(self) -> WindowConfigManager:
return self._configs
def on_load_project_async(self) -> None:
self.update_workspace_folders_async()
self._configs.update()
def on_post_save_project_async(self) -> None:
self.on_load_project_async()
def update_workspace_folders_async(self) -> None:
if self._workspace.update():
workspace_folders = self._workspace.get_workspace_folders()
for session in self._sessions:
session.update_folders(workspace_folders)
def enable_config_async(self, config_name: str) -> None:
self._configs.enable_config(config_name)
def disable_config_async(self, config_name: str) -> None:
self._configs.disable_config(config_name)
def open_location_async(
self,
location: Location,
session_name: Optional[str],
view: sublime.View,
flags: int = 0,
group: int = -1
) -> Promise[bool]:
for session in self.sessions(view):
if session_name is None or session_name == session.config.name:
return session.open_location_async(location, flags, group)
return Promise.resolve(False)
def register_listener_async(self, listener: AbstractViewListener) -> None:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
# Update workspace folders in case the user have changed those since window was created.
# There is no currently no notification in ST that would notify about folder changes.
self.update_workspace_folders_async()
self._pending_listeners.appendleft(listener)
if self._new_listener is None:
self._dequeue_listener_async()
def unregister_listener_async(self, listener: AbstractViewListener) -> None:
self._listeners.discard(listener)
def listeners(self) -> Generator[AbstractViewListener, None, None]:
yield from self._listeners
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
for listener in self.listeners():
if listener.view == view:
return listener
return None
def _dequeue_listener_async(self) -> None:
listener = None # type: Optional[AbstractViewListener]
if self._new_listener is not None:
listener = self._new_listener
# debug("re-checking listener", listener)
self._new_listener = None
else:
try:
listener = self._pending_listeners.pop()
if not listener.view.is_valid():
# debug("listener", listener, "is no longer valid")
return self._dequeue_listener_async()
# debug("adding new pending listener", listener)
self._listeners.add(listener)
except IndexError:
# We have handled all pending listeners.
self._new_session = None
return
if self._new_session:
self._sessions.add(self._new_session)
self._publish_sessions_to_listener_async(listener)
if self._new_session:
if not any(self._new_session.session_views_async()):
self._sessions.discard(self._new_session)
self._new_session.end_async()
self._new_session = None
config = self._needed_config(listener.view)
if config:
# debug("found new config for listener", listener)
self._new_listener = listener
self.start_async(config, listener.view)
else:
# debug("no new config found for listener", listener)
self._new_listener = None
self._dequeue_listener_async()
def _publish_sessions_to_listener_async(self, listener: AbstractViewListener) -> None:
inside_workspace = self._workspace.contains(listener.view)
scheme = urllib.parse.urlparse(listener.get_uri()).scheme
for session in self._sessions:
if session.can_handle(listener.view, scheme, capability=None, inside_workspace=inside_workspace):
# debug("registering session", session.config.name, "to listener", listener)
try:
listener.on_session_initialized_async(session)
except Exception as ex:
message = "failed to register session {} to listener {}".format(session.config.name, listener)
exception_log(message, ex)
def window(self) -> sublime.Window:
return self._window
def sessions(self, view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
inside_workspace = self._workspace.contains(view)
sessions = list(self._sessions)
uri = view.settings().get("lsp_uri")
if not isinstance(uri, str):
return
scheme = urllib.parse.urlparse(uri).scheme
for session in sessions:
if session.can_handle(view, scheme, capability, inside_workspace):
yield session
def get_session(self, config_name: str, file_path: str) -> Optional[Session]:
return self._find_session(config_name, file_path)
def _can_start_config(self, config_name: str, file_path: str) -> bool:
return not bool(self._find_session(config_name, file_path))
def _find_session(self, config_name: str, file_path: str) -> Optional[Session]:
inside = self._workspace.contains(file_path)
for session in self._sessions:
if session.config.name == config_name and session.handles_path(file_path, inside):
return session
return None
def _needed_config(self, view: sublime.View) -> Optional[ClientConfig]:
configs = self._configs.match_view(view)
handled = False
file_name = view.file_name()
inside = self._workspace.contains(view)
for config in configs:
handled = False
for session in self._sessions:
if config.name == session.config.name and session.handles_path(file_name, inside):
handled = True
break
if not handled:
return config
return None
def start_async(self, config: ClientConfig, initiating_view: sublime.View) -> None:
config = ClientConfig.from_config(config, {})
file_path = initiating_view.file_name() or ''
if not self._can_start_config(config.name, file_path):
# debug('Already starting on this window:', config.name)
return
try:
workspace_folders = sorted_workspace_folders(self._workspace.folders, file_path)
plugin_class = get_plugin(config.name)
variables = extract_variables(self._window)
cwd = None # type: Optional[str]
if plugin_class is not None:
if plugin_class.needs_update_or_installation():
config.set_view_status(initiating_view, "installing...")
plugin_class.install_or_update()
additional_variables = plugin_class.additional_variables()
if isinstance(additional_variables, dict):
variables.update(additional_variables)
cannot_start_reason = plugin_class.can_start(self._window, initiating_view, workspace_folders, config)
if cannot_start_reason:
config.erase_view_status(initiating_view)
message = "cannot start {}: {}".format(config.name, cannot_start_reason)
self._configs.disable_config(config.name, only_for_session=True)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
return self._window.status_message(message)
cwd = plugin_class.on_pre_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "starting...")
session = Session(self, self._create_logger(config.name), workspace_folders, config, plugin_class)
if cwd:
transport_cwd = cwd # type: Optional[str]
else:
transport_cwd = workspace_folders[0].path if workspace_folders else None
transport_config = config.resolve_transport_config(variables)
transport = create_transport(transport_config, transport_cwd, session)
if plugin_class:
plugin_class.on_post_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "initialize")
session.initialize_async(
variables=variables,
transport=transport,
working_directory=cwd,
init_callback=functools.partial(self._on_post_session_initialize, initiating_view)
)
self._new_session = session
except Exception as e:
message = "".join((
"Failed to start {0} - disabling for this window for the duration of the current session.\n",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette.",
"\n\n--- Error: ---\n{1}"
)).format(config.name, str(e))
exception_log("Unable to start subprocess for {}".format(config.name), e)
if isinstance(e, CalledProcessError):
print("Server output:\n{}".format(e.output.decode('utf-8', 'replace')))
self._configs.disable_config(config.name, only_for_session=True)
config.erase_view_status(initiating_view)
sublime.message_dialog(message)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
def _on_post_session_initialize(
self, initiating_view: sublime.View, session: Session, is_error: bool = False
) -> None:
if is_error:
session.config.erase_view_status(initiating_view)
self._new_listener = None
self._new_session = None
else:
sublime.set_timeout_async(self._dequeue_listener_async)
def _create_logger(self, config_name: str) -> Logger:
logger_map = {
"panel": PanelLogger,
"remote": RemoteLogger,
}
loggers = []
for logger_type in userprefs().log_server:
if logger_type not in logger_map:
debug("Invalid logger type ({}) specified for log_server settings".format(logger_type))
continue
loggers.append(logger_map[logger_type])
if len(loggers) == 0:
return RouterLogger() # logs nothing
elif len(loggers) == 1:
return loggers[0](self, config_name)
else:
router_logger = RouterLogger()
for logger in loggers:
router_logger.append(logger(self, config_name))
return router_logger
def handle_message_request(self, session: Session, params: Any, request_id: Any) -> None:
view = self._window.active_view()
if view:
MessageRequestHandler(view, session, request_id, params, session.config.name).show()
def restart_sessions_async(self) -> None:
self._end_sessions_async()
listeners = list(self._listeners)
self._listeners.clear()
for listener in listeners:
self.register_listener_async(listener)
def _end_sessions_async(self) -> None:
for session in self._sessions:
session.end_async()
self._sessions.clear()
def end_config_sessions_async(self, config_name: str) -> None:
sessions = list(self._sessions)
for session in sessions:
if session.config.name == config_name:
session.end_async()
self._sessions.discard(session)
def get_project_path(self, file_path: str) -> Optional[str]:
candidate = None # type: Optional[str]
for folder in self._workspace.folders:
if file_path.startswith(folder):
if candidate is None or len(folder) > len(candidate):
candidate = folder
return candidate
def should_present_diagnostics(self, uri: DocumentUri) -> Optional[str]:
scheme, path = parse_uri(uri)
if scheme != "file":
return None
if not self._workspace.contains(path):
return "not inside window folders"
view = self._window.active_view()
if not view:
return None
settings = view.settings()
if matches_pattern(path, settings.get("binary_file_patterns")):
return "matches a pattern in binary_file_patterns"
if matches_pattern(path, settings.get("file_exclude_patterns")):
return "matches a pattern in file_exclude_patterns"
if matches_pattern(path, settings.get("folder_exclude_patterns")):
return "matches a pattern in folder_exclude_patterns"
return None
def on_post_exit_async(self, session: Session, exit_code: int, exception: Optional[Exception]) -> None:
self._sessions.discard(session)
for listener in self._listeners:
listener.on_session_shutdown_async(session)
if exit_code != 0 or exception:
config = session.config
msg = "".join((
"{0} exited with status code {1}. ",
"Do you want to restart it? If you choose Cancel, it will be disabled for this window for the ",
"duration of the current session. ",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette."
)).format(config.name, exit_code)
if exception:
msg += "\n\n--- Error: ---\n{}".format(str(exception))
if sublime.ok_cancel_dialog(msg, "Restart {}".format(config.name)):
for listener in self._listeners:
self.register_listener_async(listener)
else:
self._configs.disable_config(config.name, only_for_session=True)
def plugin_unloaded(self) -> None:
"""
This is called **from the main thread** when the plugin unloads. In that case we must destroy all sessions
from the main thread. That could lead to some dict/list being mutated while iterated over, so be careful
"""
self._end_sessions_async()
def handle_server_message(self, server_name: str, message: str) -> None:
sublime.set_timeout(lambda: log_server_message(self._window, server_name, message))
def handle_log_message(self, session: Session, params: Any) -> None:
self.handle_server_message(session.config.name, extract_message(params))
def handle_stderr_log(self, session: Session, message: str) -> None:
self.handle_server_message(session.config.name, message)
def handle_show_message(self, session: Session, params: Any) -> None:
sublime.status_message("{}: {}".format(session.config.name, extract_message(params)))
def update_diagnostics_panel_async(self) -> None:
to_render = [] # type: List[str]
self.total_error_count = 0
self.total_warning_count = 0
listeners = list(self._listeners)
prephantoms = [] # type: List[Tuple[int, int, str, str]]
row = 0
contributions = OrderedDict(
) # type: OrderedDict[str, List[Tuple[str, Optional[int], Optional[str], Optional[str]]]]
for session in self._sessions:
local_errors, local_warnings = session.diagnostics_manager.sum_total_errors_and_warnings_async()
self.total_error_count += local_errors
self.total_warning_count += local_warnings
for path, contribution in session.diagnostics_manager.diagnostics_panel_contributions_async():
seen = path in contributions
contributions.setdefault(path, []).extend(contribution)
if not seen:
contributions.move_to_end(path)
for path, contribution in contributions.items():
to_render.append("{}:".format(path))
row += 1
for content, offset, code, href in contribution:
to_render.append(content)
if offset is not None and code is not None and href is not None:
prephantoms.append((row, offset, code, href))
row += content.count("\n") + 1
to_render.append("") # add spacing between filenames
row += 1
for listener in listeners:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
characters = "\n".join(to_render)
if not characters:
characters = _NO_DIAGNOSTICS_PLACEHOLDER
sublime.set_timeout(functools.partial(self._update_panel_main_thread, characters, prephantoms))
def _update_panel_main_thread(self, characters: str, prephantoms: List[Tuple[int, int, str, str]]) -> None:
panel = ensure_diagnostics_panel(self._window)
if not panel or not panel.is_valid():
return
panel.run_command("lsp_update_panel", {"characters": characters})
if self._panel_code_phantoms is None:
self._panel_code_phantoms = sublime.PhantomSet(panel, "hrefs")
phantoms = [] # type: List[sublime.Phantom]
for row, col, code, href in prephantoms:
point = panel.text_point(row, col)
region = sublime.Region(point, point)
phantoms.append(sublime.Phantom(region, make_link(href, code), sublime.LAYOUT_INLINE))
self._panel_code_phantoms.update(phantoms)
def show_diagnostics_panel_async(self) -> None:
if self._window.active_panel() is None:
self._window.run_command("show_panel", {"panel": "output.diagnostics"})
class WindowRegistry(object):
def __init__(self, configs: ConfigManager) -> None:
self._windows = {} # type: Dict[int, WindowManager]
self._configs = configs
def lookup(self, window: sublime.Window) -> WindowManager:
wm = self._windows.get(window.id())
if wm:
return wm
workspace = ProjectFolders(window)
window_configs = self._configs.for_window(window)
state = WindowManager(window=window, workspace=workspace, configs=window_configs)
self._windows[window.id()] = state
return state
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
w = view.window()
if not w:
return None
return self.lookup(w).listener_for_view(view)
def discard(self, window: sublime.Window) -> None:
self._windows.pop(window.id(), None)
class PanelLogger(Logger):
def __init__(self, manager: WindowManager, server_name: str) -> None:
self._manager = ref(manager)
self._server_name = server_name
def stderr_message(self, message: str) -> None:
"""
Not handled here as stderr messages are handled by WindowManager regardless
if this logger is enabled.
"""
pass
def log(self, message: str, params: Any) -> None:
def run_on_async_worker_thread() -> None:
nonlocal message
params_str = str(params)
if 0 < userprefs().log_max_size <= len(params_str):
params_str = '<params with {} characters>'.format(len(params_str))
message = "{}: {}".format(message, params_str)
manager = self._manager()
if manager is not None:
manager.handle_server_message(":", message)
sublime.set_timeout_async(run_on_async_worker_thread)
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_response(">>>", request_id), params)
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
if not userprefs().log_server:
return
self.log(self._format_response("~~>", request_id), error.to_lsp())
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("-->", method, request_id), params)
def outgoing_notification(self, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_notification(" ->", method), params)
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
if not userprefs().log_server:
return
direction = "<~~" if is_error else "<<<"
self.log(self._format_response(direction, request_id), params)
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("<--", method, request_id), params)
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not userprefs().log_server:
return
direction = "<? " if unhandled else "<- "
self.log(self._format_notification(direction, method), params)
def _format_response(self, direction: str, request_id: Any) -> str:
return "{} {} {}".format(direction, self._server_name, request_id)
def _format_request(self, direction: str, method: str, request_id: Any) -> str:
return "{} {} {}({})".format(direction, self._server_name, method, request_id)
def _format_notification(self, direction: str, method: str) -> str:
return "{} {} {}".format(direction, self._server_name, method)
class RemoteLogger(Logger):
PORT = 9981
DIRECTION_OUTGOING = 1
DIRECTION_INCOMING = 2
_ws_server = None # type: Optional[WebsocketServer]
_ws_server_thread = None # type: Optional[threading.Thread]
_last_id = 0
def __init__(self, manager: WindowManager, server_name: str) -> None:
RemoteLogger._last_id += 1
self._server_name = '{} ({})'.format(server_name, RemoteLogger._last_id)
if not RemoteLogger._ws_server:
try:
RemoteLogger._ws_server = WebsocketServer(self.PORT)
RemoteLogger._ws_server.set_fn_new_client(self._on_new_client)
RemoteLogger._ws_server.set_fn_client_left(self._on_client_left)
RemoteLogger._ws_server.set_fn_message_received(self._on_message_received)
self._start_server()
except OSError as ex:
if ex.errno == 48: # Address already in use
debug('WebsocketServer not started - address already in use')
RemoteLogger._ws_server = None
else:
raise ex
def _start_server(self) -> None:
def start_async() -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.run_forever()
RemoteLogger._ws_server_thread = threading.Thread(target=start_async)
RemoteLogger._ws_server_thread.start()
def _stop_server(self) -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.shutdown()
RemoteLogger._ws_server = None
if RemoteLogger._ws_server_thread:
RemoteLogger._ws_server_thread.join()
RemoteLogger._ws_server_thread = None
def _on_new_client(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client connecting (after handshake)."""
debug("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("Hey all, a new client has joined us")
def _on_client_left(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client disconnecting."""
debug("Client(%d) disconnected" % client['id'])
def _on_message_received(self, client: Dict, server: WebsocketServer, message: str) -> None:
"""Called when a client sends a message."""
debug("Client(%d) said: %s" % (client['id'], message))
def stderr_message(self, message: str) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': 'stderr',
'params': message,
'isError': True,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_INCOMING,
'isError': is_error,
})
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_response(self, request_id: Any, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'isError': True,
'params': error.to_lsp(),
'time': round(time() * 1000),
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_notification(self, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'error': 'Unhandled notification!' if unhandled else None,
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def _broadcast_json(self, data: Dict[str, Any]) -> None:
if RemoteLogger._ws_server:
json_data = json.dumps(data, sort_keys=True, check_circular=False, separators=(',', ':'))
RemoteLogger._ws_server.send_message_to_all(json_data)
class RouterLogger(Logger):
def __init__(self) -> None:
self._loggers = [] # type: List[Logger]
def append(self, logger: Logger) -> None:
self._loggers.append(logger)
def stderr_message(self, *args: Any, **kwargs: Any) -> None:
self._foreach("stderr_message", *args, **kwargs)
def outgoing_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_response", *args, **kwargs)
def outgoing_error_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_error_response", *args, **kwargs)
def outgoing_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_request", *args, **kwargs)
def outgoing_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_notification", *args, **kwargs)
def incoming_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_response", *args, **kwargs)
def incoming_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_request", *args, **kwargs)
def incoming_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_notification", *args, **kwargs)
def _foreach(self, method: str, *args: Any, **kwargs: Any) -> None:
for logger in self._loggers:
getattr(logger, method)(*args, **kwargs)
|
render.py
|
import multiprocessing as mp
import os
from .elements.frame import Frame
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Renderer:
count = -1
def __init__(self, timeline, preview=True, clearall=False):
self.timeline = timeline
self.width = 640 if preview else 1920
self.height = 360 if preview else 1080
self.preview = preview
Renderer.count += 1
chrome_options = Options()
chrome_options.add_argument("--headless")
self.drivers = [webdriver.Chrome(chrome_options=chrome_options) for _ in range(4)]
# set viewport to be the image size
for i in range(len(self.drivers)):
window_size = self.drivers[i].execute_script(
"""
return [window.outerWidth - window.innerWidth + arguments[0],
window.outerHeight - window.innerHeight + arguments[1]];
""", self.width, self.height)
self.drivers[i].set_window_size(*window_size)
self.cwd = os.getcwd()
if clearall:
os.system(
"mkdir -p videos && rm -rf videos/* "
"&& mkdir -p images/svgs && rm -rf images/svgs/* "
"&& mkdir -p images/pngs && rm -rf images/pngs/* "
)
os.system(f"mkdir -p images/svgs/{Renderer.count} && mkdir -p images/pngs/{Renderer.count}")
def render_svgs(self):
frame_number = 0
processes = []
while frame_number <= self.timeline._lifetime:
print(
f"creating frame: {frame_number} | completed: {(frame_number / self.timeline._lifetime) * 100:.2f}%",
end="\r")
frame = Frame(self.width, self.height)
self.timeline.exec(frame_number, frame)
p = mp.Process(target=frame.save, args=(f"images/svgs/{Renderer.count}/{frame_number}.svg",))
processes.append(p)
p.start()
if len(processes) == 64:
for p in processes:
p.join()
processes.clear()
for element in frame.elements.values():
element.dynamic_reset()
frame_number += 1
for p in processes:
p.join()
processes.clear()
def render_pngs(self, remove_svgs=False):
if len(os.listdir(f"images/svgs/{Renderer.count}")) == 0:
self.render_svgs()
q = mp.Queue(maxsize=len(self.drivers))
processes = []
for i in range(len(self.drivers)):
q.put(i)
frame_number = 0
while frame_number < len(os.listdir(f"images/svgs/{Renderer.count}")):
if not q.empty():
p = mp.Process(target=self._save_png, args=(frame_number, q))
processes.append(p)
p.start()
frame_number += 1
if len(processes) == 128:
for p in processes:
p.join()
processes.clear()
for p in processes:
p.join()
processes.clear()
for i in range(len(self.drivers)):
self.drivers[i].quit()
if remove_svgs:
os.system(f"rm -rf images/svgs/{Renderer.count}")
def render_video(self, filename="mov", lossless=False, remove_images=False):
if len(os.listdir(f"images/pngs/{Renderer.count}")) == 0:
self.render_pngs()
if lossless:
os.system(f"cd images/pngs/{Renderer.count} && "
f"ffmpeg -framerate {self.timeline.fps} "
f"-i %d.png -c:v copy {filename}.mkv && "
f"mv {filename}.mkv ../../../videos/")
else:
os.system(" ".join([f"cd images/pngs/{Renderer.count} &&",
"ffmpeg",
"-y",
"-vcodec", "png",
"-framerate", f"{self.timeline.fps}",
"-s", f"{self.width}x{self.height}",
"-i", "%d.png",
"-c:v", "libx264",
"-crf", "0" if self.preview else "17",
"-preset", "ultrafast" if self.preview else "slower",
"-pix_fmt", "yuv420p",
"-an",
"-tune", "animation",
f"{filename}.mp4",
f"&& mv {filename}.mp4 ../../../videos/"]))
if remove_images:
print("ok")
os.system(f"rm -rf images/svgs/{Renderer.count} && rm -rf images/pngs/{Renderer.count}")
def _save_png(self, frame_number, q):
i = q.get()
self.drivers[i].get("file://" + self.cwd + f"/images/svgs/{Renderer.count}/{frame_number}.svg")
self.drivers[i].save_screenshot(self.cwd + f"/images/pngs/{Renderer.count}/{frame_number}.png")
q.put(i)
def __delete__(self):
for i in range(len(self.drivers)):
self.drivers[i].quit()
# " DRI_PRIME=1 parallel convert '{} {.}.bmp' ::: *.svg &&"
# " mv *.bmp ../bmps &&"
# " cd ../bmps &&"
# "-vb", "20M",\
# "-start_number", "0",\
# "-i", "-",\
# "-bf", "2",\
# "-g", "30",\
# "-an",
# "-use_editlist", "0",\
# "-movflags", "+faststart",\
# "-profile:v", "high",\
# "-tune", "animation",\
# "-crf", "18",\
# "-preset", "ultrafast" if preview else "slower",\
# "-frames:v", f"{number_of_svgs}",\
# "-c:a","aac",\
# "-q:a","1",\
# "-ac","2",\
# "-ar","48000",\
# parallel convert '{} {.}.png' ::: *.svg
# def concat_partials(self):
# if self.bool_combine_partial_renders:
# mov_files = []
# for mov in sorted(os.listdir("mov"), key=lambda x: int(x[:-4])):
# mov_files.append(f"file '{mov}'")
# with open("mov/movlist.txt", "w") as f:
# f.write("\n".join(mov_files))
# os.system(" ".join(["cd mov &&", self.ffmpeg_bin,
# "-safe", "0",\
# "-f", "concat",\
# "-i", "movlist.txt",\
# "-c", "copy",\
# "-loglevel", "error",\
# "mov.mp4"]))
#
# if self.bool_save_video:
# self.p = Popen((["ffmpeg",
# "-y",
# "-f", "image2pipe",\
# "-vcodec", "png",\
# "-framerate", f"{self.timeline.fps}",\
# "-s", f"{self.width}x{self.height}",\
# "-i", "%d.png",\
# "-c:v", "libx264",\
# "-crf", "0", \
# "-preset", "ultrafast" if preview else "slower", \
# "-pix_fmt", "yuv420p",\
# "-an",
# "-tune", "animation",\
# "-loglevel", "error",\
# f"{filename}.mp4"]), stdin=PIPE)
|
multidownloadXkcd.py
|
#!python3
# multidownloadXkcd.py - Downloads XKCD comics using multiple threads
import requests, os, bs4, threading
os.makedirs('xkcd', exist_ok=True) # Store comics in ./xkcd
def downloadXkcd(startComic, endComic):
for urlNumber in range(startComic, endComic):
# Download the page
print('Downloading page http://xkcd.com/%s' % (urlNumber))
res = requests.get('http://xkcd.com/%s' % (urlNumber))
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
# Find the URL of the comic image.
comicElem = soup.select('#comic img')
if comicElem == []:
print('Could not find comic image.')
else:
comicUrl = 'http:%s' % (comicElem[0].get('src'))
# Download the image
print('Downloading image %s...' % (comicUrl))
res = requests.get(comicUrl)
res.raise_for_status()
# Save the image to ./xkcd.
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
for chunk in res.iter_content(100_000):
imageFile.write(chunk)
imageFile.close()
# Create and start the Thread objects.
downloadThreads = []
for i in range(0, 1400, 100):
downloadThread = threading.Thread(target=downloadXkcd, args=(i, i+99))
downloadThreads.append(downloadThread)
downloadThread.start()
# Wait for all threads to end.
for downloadThread in downloadThreads:
downloadThread.join()
print('Done.')
|
tb_device_mqtt.py
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import queue
import ssl
import time
from simplejson import loads, dumps
from threading import RLock
from threading import Thread
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
import paho.mqtt.client as paho
from jsonschema import Draft7Validator
from jsonschema import ValidationError
KV_SCHEMA = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 1,
}
SCHEMA_FOR_CLIENT_RPC = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 0,
}
TS_KV_SCHEMA = {
"type": "object",
"properties": {
"ts": {
"type": ["integer"]
},
"values": KV_SCHEMA
},
"additionalProperties": False
}
DEVICE_TS_KV_SCHEMA = {
"type": "array",
"items": TS_KV_SCHEMA
}
DEVICE_TS_OR_KV_SCHEMA = {
"type": "array",
"items": {
"anyOf":
[
TS_KV_SCHEMA,
KV_SCHEMA
]
}
}
RPC_VALIDATOR = Draft7Validator(SCHEMA_FOR_CLIENT_RPC)
KV_VALIDATOR = Draft7Validator(KV_SCHEMA)
TS_KV_VALIDATOR = Draft7Validator(TS_KV_SCHEMA)
DEVICE_TS_KV_VALIDATOR = Draft7Validator(DEVICE_TS_KV_SCHEMA)
DEVICE_TS_OR_KV_VALIDATOR = Draft7Validator(DEVICE_TS_OR_KV_SCHEMA)
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
log = logging.getLogger("tb_connection")
log.setLevel(logging.DEBUG)
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class TBPublishInfo:
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, message_info):
self.message_info = message_info
def rc(self):
return self.message_info.rc
def mid(self):
return self.message_info.mid
def get(self):
self.message_info.wait_for_publish()
return self.message_info.rc
class TBDeviceMqttClient:
def __init__(self, host, port=1883, token=None):
self._client = paho.Client()
self.__host = host
self.__port = port
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = RLock()
self._attr_request_dict = {}
self.__timeout_queue = queue.Queue()
self.__timeout_thread = Thread(target=self.__timeout_check)
self.__timeout_thread.daemon = True
self.__timeout_thread.start()
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
self._client.on_disconnect = self._on_disconnect
def _on_log(self, client, userdata, level, buf):
log.exception(buf)
def _on_publish(self, client, userdata, result):
# log.debug("Data published to ThingsBoard!")
pass
def _on_disconnect(self, client, userdata, rc):
log.debug(client)
log.debug("Disconnected")
def _on_connect(self, client, userdata, flags, rc, *extra_params):
result_codes = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
if self.__connect_callback:
self.__connect_callback(client, userdata, flags, rc, *extra_params)
if rc == 0:
self.__is_connected = True
log.info("connection SUCCESS")
log.debug(client)
self._client.subscribe(ATTRIBUTES_TOPIC, qos=1)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", 1)
self._client.subscribe(RPC_REQUEST_TOPIC + '+')
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=1)
else:
if rc in result_codes:
log.error("connection FAIL with error {rc} {explanation}".format(rc=rc,
explanation=result_codes[rc]))
else:
log.error("connection FAIL with unknown error")
def is_connected(self):
return self.__is_connected
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, ca_certs=None, cert_file=None, key_file=None, keepalive=60):
if tls:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
self._client.connect(self.__host, self.__port, keepalive=keepalive)
self.reconnect_delay_set(min_reconnect_delay, timeout)
self._client.loop_start()
self.__connect_callback = callback
def disconnect(self):
self._client.disconnect()
log.debug(self._client)
log.debug("Disconnecting from ThingsBoard")
self.__is_connected = False
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
self._on_decoded_message(content, message)
@staticmethod
def validate(validator, data):
try:
validator.validate(data)
except ValidationError as e:
log.error(e)
raise e
def _on_decoded_message(self, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
callback = self.__device_client_rpc_dict.pop(request_id)
callback(request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
dict_results = []
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for x in self.__device_sub_dict["*"]:
dict_results.append(self.__device_sub_dict["*"][x])
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for x in self.__device_sub_dict[key]:
dict_results.append(self.__device_sub_dict[key][x])
for res in dict_results:
res(content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC+"/response/"):])
# pop callback and use it
callback = self._attr_request_dict.pop(req_id)
callback(content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=1, wait_for_publish=False):
if quality_of_service != 0 and quality_of_service != 1:
log.error("Quality of service (qos) value must be 0 or 1")
return
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
self.validate(RPC_VALIDATOR, params)
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=1)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos != 0 and qos != 1:
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
else:
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=1):
if type(telemetry) is not list:
telemetry = [telemetry]
self.validate(DEVICE_TS_OR_KV_VALIDATOR, telemetry)
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=1):
self.validate(KV_VALIDATOR, attributes)
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for x in self.__device_sub_dict:
if self.__device_sub_dict[x].get(subscription_id):
del self.__device_sub_dict[x][subscription_id]
log.debug("Unsubscribed from {attribute}, subscription id {sub_id}".format(attribute=x,
sub_id=subscription_id))
if subscription_id == '*':
self.__device_sub_dict = {}
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v is not {})
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to {key} with id {id}".format(key=key, id=self.__device_max_sub_id))
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, ts):
self.__timeout_queue.put({"ts": ts, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
while True:
try:
item = self.__timeout_queue.get()
if item is not None:
while True:
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
else:
time.sleep(0.001)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(None, TBTimeoutException("Timeout while waiting for reply from ThingsBoard!"))
else:
time.sleep(0.001)
except Exception as e:
log.warning(e)
|
1mtc_north.py
|
from __future__ import print_function
from pyfrac.utils import pyfraclogger
from pyfrac.control import keyboard
from pyfrac.acquire import capture
import multiprocessing
import atexit
import json
import pika
import time
import os
logger = pyfraclogger.pyfraclogger(tofile=True)
RPC_QUEUE_NAME = "1mtcNorth_ir_queue"
RPC_VHOST = "/ir"
IR_IMAGE_DIR = os.getenv('mtc_ir_dir')
NORTH_IR_IMG_DIR = os.path.join(IR_IMAGE_DIR, 'North', 'live')
NORTH_IRCAM_IP = os.getenv("north_ircam_ip")
NORTH_IRCAM_FTP_UNAME = os.getenv("north_ircam_ftp_uname")
NORTH_IRCAM_FTP_PASS = os.getenv("north_ircam_ftp_pass")
# String to insert in the filename
NORTH_LOC_STRING = "north"
def _initialize(cam_lock, capture_event, frames_captured,
count, recent_error, interval, capture_die):
"""
Setup the global events that will be used
to trigger the capture loop's different functions
in separate processes
Parameters:
----------
cam_lock: `multiprocessing.Lock`
For obtaining exclusive lock so that two
commands cannot be sent to the camera
simultaneously.
.. note: Camera's buffer overflows when it gets hit by
commands at more than 1Hz.
capture_event: `multiprocessing.Event`
This will be used to trigger the capture
start on the cam
frames_captured: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames captured
within the capture loop
count: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames
to be captured within the capture loop
recent_error: `multiprocessing.Manager.Value`
This will be used to report the most recent error that
occured during capture or some other process
interval: `multiprocessing.Manager.Value`
This will be used to exchange the number of seconds
to wait between successive frame captures
within the capture loop
"""
logger.info("INITIALIZING")
_capture.cam_lock = cam_lock
_capture.capture_event = capture_event
_capture.frames_captured = frames_captured
_capture.count = count
_capture.recent_error = recent_error
_capture.interval = interval
_capture.capture_die = capture_die
def _capture(cam, *args):
"""
Responsible for capturing images from the camera.
!!Do not call this method manually!!
.. note: Refer `_initialize()`
Parameters:
----------
cam: ICDA320 camera object
Camera object using which capture
operations needs to be performed
"""
multiprocessing.current_process().name = "IRCaptureLoop"
_capture.frames_captured.value = 0
try:
while not _capture.capture_die.get():
try:
_capture.capture_event.wait()
with _capture.cam_lock:
start_time = time.time()
if _capture.count.get() == -1:
fname = str(cam.capture(img_name=str(NORTH_LOC_STRING)+"-")) +\
".jpg"
cam.fetch(filename="", pattern="jpg")
_capture.frames_captured.value += 1
elif _capture.count.get() > 0:
fname = str(cam.capture(img_name=str(NORTH_LOC_STRING)+"-")) +\
".jpg"
cam.fetch(filename="", pattern="jpg")
# Increment frames captured count
_capture.frames_captured.value += 1
_capture.count.value -= 1
elif _capture.count.get() == 0:
_capture.capture_event.clear()
time.sleep(_capture.interval.get() - (time.time() - start_time))
except Exception as ex:
logger.error("Error in _capture process: "+str(ex))
_capture.recent_error.value = "Error in _capture process: "+str(ex)
continue
#_capture.capture_event.clear()
else:
cam.cleanup()
except KeyboardInterrupt as ki:
logger.info("Exiting from "+str(multiprocessing.current_process().name))
def camera_commands(cam, cam_lock, capture_event, frames_captured,
count, recent_error, interval, command_dict):
"""
Perform actions on the camera based on
the command dictionary
Parameters:
----------
cam: ICDA320 camera object
cam_lock: `multiprocessing.Lock`
For obtaining exclusive lock so that two
commands cannot be sent to the camera
simultaneously.
.. note: Camera's buffer overflows when it gets hit by
commands at more than 1Hz.
capture_event: `multiprocessing.Event`
This will be used to trigger the capture
start on the cam
frames_captured: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames captured
within the capture loop
count: `multiprocessing.Manager.Value`
This will be used to exchange the number of frames
to be captured within the capture loop
recent_error: `multiprocessing.Manager.Value`
This will be used to report the most recent error that
occured during capture or some other process
interval: `multiprocessing.Manager.Value`
This will be used to exchange the number of seconds
to wait between successive frame captures
within the capture loop
command_dict: dictionary containing (k,v)
pairs for following keys:
capture: `bool`
interval: `str`
stop: `bool`
status: `bool`
focus: `int`
zoom: `int`
"""
def _current_status(msg="", **kwargs):
"""
This function will return the status
of the capture system
Parameters:
----------
msg: str, optional
If any custom message needs to be returned
"""
with cam_lock:
kwargs.update({
"capture": count.get(),
"interval": interval.get(),
"zoom": cam.zoom(),
"focus": cam.focus(),
"frames_captured": frames_captured.get(),
"msg": msg,
"recent_error": recent_error.get()
})
return kwargs
try:
if command_dict["stop"]:
# Stop capturing images
logger.info("Stopping current capture")
capture_event.clear()
if command_dict["status"]:
return _current_status()
if command_dict["zoom"] > 0:
cam.zoom(int(command_dict["zoom"]))
if command_dict["focus"]:
cam.focus(command_dict["focus"])
# Make sure before starting capture
# - any previous capture is not running
# - interval value is provided
if command_dict["capture"]:
if not capture_event.is_set():
if command_dict["interval"] > 0:
interval.value = command_dict["interval"]
frames_captured.value = 0
if command_dict["count"] > 0:
# Start capturing X images
count.value = command_dict["count"]
capture_event.set()
elif command_dict["count"] <= -1:
count.value = command_dict["count"]
capture_event.set()
else:
logger.warning("Cannot start capture without the interval field")
else:
logger.warning("Previous capture is already in progress")
return _current_status(msg="Previous capture is already in progress")
return _current_status()
except Exception as ex:
logger.warning("Couldn't execute following camera commands: "+str(ex)+\
"\n"+str(command_dict))
return _current_status(msg="Couldn't execute following camera commands: "+str(ex)+\
"\n"+str(command_dict))
finally:
# Reset the recent error after it has been sent once
recent_error.value = ""
def killChildProc(process, die):
"""
Kills child processes before terminating
due to some non-fatal (and non signal)
interrupt. e.g. ctrl c or an exception
"""
logger.warning("Killing: " + str(process))
die.value = True
time.sleep(2)
process.terminate()
process.join()
if __name__ == "__main__":
# Obtain the camera
logger.info("Obtaining Camera ... ")
north_cam = capture.ICDA320(tn_host=NORTH_IRCAM_IP,
tn_port=23,
ftp_host=NORTH_IRCAM_IP,
ftp_port=21,
ftp_username=NORTH_IRCAM_FTP_UNAME,
ftp_password=NORTH_IRCAM_FTP_PASS,
ir_image_dir=NORTH_IR_IMG_DIR)
# Manager responsible for exchanging messages with
# other process
mp_manager = multiprocessing.Manager()
# Setup events and shared Value
cam_lock = multiprocessing.Lock()
capture_event = mp_manager.Event()
recent_error = mp_manager.Value("recent_error", "")
frames_captured = mp_manager.Value('frames_captured', 0)
count = mp_manager.Value('count', 0)
interval = mp_manager.Value('interval', 0)
die = mp_manager.Value('die', False)
# Setup pool, initialize shared objects and start the process
logger.info("Starting camera capture process ... ")
_initialize(cam_lock, capture_event, frames_captured,
count, recent_error, interval, die)
process = multiprocessing.Process(target=_capture, args=(north_cam,))
process.start()
# graceful exit (for SIGINT & SIGQUIT)
atexit.register(killChildProc, process, die)
# RPC connection setup
logger.info("Setting up RPC connection")
credentials = pika.PlainCredentials(os.getenv("rpc_user"), os.getenv("rpc_pass"))
connection = pika.BlockingConnection(
pika.ConnectionParameters(os.getenv("rpc_server"), os.getenv("rpc_port"),
RPC_VHOST, credentials))
channel = connection.channel()
channel.queue_declare(queue=RPC_QUEUE_NAME)
def on_request(ch, method, props, body):
"""
Blocking Function for handling the incoming data
Refer "http://pika.readthedocs.io/en/0.11.2/modules/adapters/blocking.html"
"""
command_dict = json.loads(body)
logger.debug("Correlation id: " + str(props.correlation_id))
response = camera_commands(north_cam, cam_lock, capture_event,
frames_captured, count, recent_error,
interval, command_dict)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue=RPC_QUEUE_NAME)
logger.info("Listening for RPC messages")
channel.start_consuming()
except KeyboardInterrupt as ki:
print()
logger.info("Exiting now")
except Exception as ex:
logger.critical("Critical Exception in main: "+str(ex))
|
optimization.py
|
import hashlib
import json
import six
from copy import copy
from datetime import datetime
from itertools import product
from logging import getLogger
from threading import Thread, Event
from time import time
from typing import List, Set, Union, Any, Sequence, Optional, Mapping, Callable
from .job import TrainsJob
from .parameters import Parameter
from ..backend_interface.util import get_or_create_project
from ..logger import Logger
from ..backend_api.services import workers as workers_service, tasks as tasks_service, events as events_service
from ..task import Task
logger = getLogger('clearml.automation.optimization')
class Objective(object):
"""
Optimization ``Objective`` class to maximize / minimize over all experiments. This class will sample a specific
scalar from all experiments, and maximize / minimize over single scalar (i.e., title and series combination).
``SearchStrategy`` and ``HyperParameterOptimizer`` use ``Objective`` in the strategy search algorithm.
"""
def __init__(self, title, series, order='max', extremum=False):
# type: (str, str, str, bool) -> ()
"""
Construct ``Objective`` object that will return the scalar value for a specific task ID.
:param str title: The scalar graph title to sample from.
:param str series: The scalar series title to sample from.
:param str order: The setting for maximizing or minimizing the objective scalar value.
The values are:
- ``max``
- ``min``
:param bool extremum: Return the global minimum / maximum reported metric value
The values are:
- ``True`` - Return the global minimum / maximum reported metric value.
- ``False`` - Return the last value reported for a specific Task. (Default)
"""
self.title = title
self.series = series
assert order in ('min', 'max',)
# normalize value so we always look for the highest objective value
self.sign = -1 if (isinstance(order, str) and order.lower().strip() == 'min') else +1
self._metric = None
self.extremum = extremum
def get_objective(self, task_id):
# type: (Union[str, Task, TrainsJob]) -> Optional[float]
"""
Return a specific task scalar value based on the objective settings (title/series).
:param str task_id: The Task id to retrieve scalar from (or ``TrainsJob`` object).
:return: The scalar value.
"""
# create self._metric
self._get_last_metrics_encode_field()
if isinstance(task_id, Task):
task_id = task_id.id
elif isinstance(task_id, TrainsJob):
task_id = task_id.task_id()
# noinspection PyBroadException, Py
try:
# noinspection PyProtectedMember
task = Task._query_tasks(
task_ids=[task_id], only_fields=['last_metrics.{}.{}'.format(self._metric[0], self._metric[1])])[0]
except Exception:
return None
metrics = task.last_metrics
if not metrics:
return None
# noinspection PyBroadException
try:
values = metrics[self._metric[0]][self._metric[1]]
if not self.extremum:
return values['value']
return values['min_value'] if self.sign < 0 else values['max_value']
except Exception:
return None
def get_current_raw_objective(self, task):
# type: (Union[TrainsJob, Task]) -> (int, float)
"""
Return the current raw value (without sign normalization) of the objective.
:param str task: The Task or Job to retrieve scalar from (or ``TrainsJob`` object).
:return: Tuple(iteration, value) if, and only if, the metric exists. None if the metric does not exist.
"""
if isinstance(task, Task):
task_id = task.id
elif isinstance(task, TrainsJob):
task_id = task.task_id()
else:
task_id = task
if not task_id:
raise ValueError("Task ID not provided")
# send request
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
res = Task._get_default_session().send(
events_service.ScalarMetricsIterHistogramRequest(
task=task_id, key='iter', samples=None),
)
except Exception:
res = None
if not res:
return None
response = res.wait()
if not response.ok() or not response.response_data:
return None
scalars = response.response_data
# noinspection PyBroadException
try:
return scalars[self.title][self.series]['x'][-1], scalars[self.title][self.series]['y'][-1]
except Exception:
return None
def get_objective_sign(self):
# type: () -> float
"""
Return the sign of the objective.
- ``+1`` - If maximizing
- ``-1`` - If minimizing
:return: Objective function sign.
"""
return self.sign
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self.title, self.series
def get_normalized_objective(self, task_id):
# type: (Union[str, Task, TrainsJob]) -> Optional[float]
"""
Return a normalized task scalar value based on the objective settings (title/series).
I.e. objective is always to maximize the returned value
:param str task_id: The Task id to retrieve scalar from.
:return: Normalized scalar value.
"""
objective = self.get_objective(task_id=task_id)
if objective is None:
return None
# normalize value so we always look for the highest objective value
return self.sign * objective
def get_top_tasks(self, top_k, optimizer_task_id=None):
# type: (int, Optional[str]) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the title/series objective.
:param int top_k: The number of Tasks (experiments) to return.
:param str optimizer_task_id: Parent optimizer Task ID
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
task_filter = {'page_size': int(top_k), 'page': 0}
if optimizer_task_id:
task_filter['parent'] = optimizer_task_id
order_by = self._get_last_metrics_encode_field()
if order_by and (order_by.startswith('last_metrics') or order_by.startswith('-last_metrics')):
parts = order_by.split('.')
if parts[-1] in ('min', 'max', 'last'):
title = hashlib.md5(str(parts[1]).encode('utf-8')).hexdigest()
series = hashlib.md5(str(parts[2]).encode('utf-8')).hexdigest()
minmax = 'min_value' if 'min' in parts[3] else ('max_value' if 'max' in parts[3] else 'value')
order_by = '{}last_metrics.'.join(
('-' if order_by and order_by[0] == '-' else '', title, series, minmax))
if order_by:
task_filter['order_by'] = [order_by]
return Task.get_tasks(task_filter=task_filter)
def _get_last_metrics_encode_field(self):
# type: () -> str
"""
Return encoded representation of the title/series metric.
:return: The objective title/series.
"""
if not self._metric:
title = hashlib.md5(str(self.title).encode('utf-8')).hexdigest()
series = hashlib.md5(str(self.series).encode('utf-8')).hexdigest()
self._metric = title, series
return '{}last_metrics.{}.{}.{}'.format(
'-' if self.sign > 0 else '', self._metric[0], self._metric[1],
('min_value' if self.sign < 0 else 'max_value') if self.extremum else 'value')
class Budget(object):
class Field(object):
def __init__(self, limit=None):
# type: (Optional[float]) -> ()
self.limit = limit
self.current = {}
def update(self, uid, value):
# type: (Union[str, int], float) -> ()
if value is not None:
try:
self.current[uid] = float(value)
except (TypeError, ValueError):
pass
@property
def used(self):
# type: () -> (Optional[float])
if self.limit is None or not self.current:
return None
return sum(self.current.values())/float(self.limit)
def __init__(self, jobs_limit, iterations_limit, compute_time_limit):
# type: (Optional[int], Optional[int], Optional[float]) -> ()
self.jobs = self.Field(jobs_limit)
self.iterations = self.Field(iterations_limit)
self.compute_time = self.Field(compute_time_limit)
def to_dict(self):
# type: () -> (Mapping[str, Mapping[str, float]])
# returned dict is Mapping[Union['jobs', 'iterations', 'compute_time'], Mapping[Union['limit', 'used'], float]]
current_budget = {}
jobs = self.jobs.used
current_budget['jobs'] = {'limit': self.jobs.limit, 'used': jobs if jobs else 0}
iterations = self.iterations.used
current_budget['iterations'] = {'limit': self.iterations.limit, 'used': iterations if iterations else 0}
compute_time = self.compute_time.used
current_budget['compute_time'] = {'limit': self.compute_time.limit, 'used': compute_time if compute_time else 0}
return current_budget
class SearchStrategy(object):
"""
The base search strategy class. Inherit this class to implement your custom strategy.
"""
_tag = 'optimization'
_job_class = TrainsJob # type: TrainsJob
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
min_iteration_per_job=None, # type: Optional[int]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a search strategy optimizer.
:param str base_task_id: The Task ID (str)
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When time limit is
exceeded, the job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int min_iteration_per_job: The minimum iterations (of the Objective metric) per single job (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric) per single job.
When maximum iterations is exceeded, the job is aborted. (Optional)
:param int total_max_jobs: The total maximum jobs for the optimization process. The default value is ``None``,
for unlimited.
"""
super(SearchStrategy, self).__init__()
self._base_task_id = base_task_id
self._hyper_parameters = hyper_parameters
self._objective_metric = objective_metric
self._execution_queue = execution_queue
self._num_concurrent_workers = num_concurrent_workers
self.pool_period_minutes = pool_period_min
self.time_limit_per_job = time_limit_per_job
self.compute_time_limit = compute_time_limit
self.max_iteration_per_job = max_iteration_per_job
self.min_iteration_per_job = min_iteration_per_job
self.total_max_jobs = total_max_jobs
self._stop_event = Event()
self._current_jobs = []
self._pending_jobs = []
self._num_jobs = 0
self._job_parent_id = None
self._job_project_id = None
self._created_jobs_ids = {}
self._naming_function = None
self._job_project = {}
self.budget = Budget(
jobs_limit=self.total_max_jobs,
compute_time_limit=self.compute_time_limit if self.compute_time_limit else None,
iterations_limit=self.total_max_jobs * self.max_iteration_per_job if
self.max_iteration_per_job and self.total_max_jobs else None
)
self._validate_base_task()
self._optimizer_task = None
def start(self):
# type: () -> ()
"""
Start the Optimizer controller function loop(). If the calling process is stopped, the controller will stop
as well.
.. important::
This function returns only after the optimization is completed or :meth:`stop` was called.
"""
counter = 0
while True:
logger.debug('optimization loop #{}'.format(counter))
if not self.process_step():
break
if self._stop_event.wait(timeout=self.pool_period_minutes * 60.):
break
counter += 1
def stop(self):
# type: () -> ()
"""
Stop the current running optimization loop. Called from a different thread than the :meth:`start`.
"""
self._stop_event.set()
def process_step(self):
# type: () -> bool
"""
Abstract helper function. Implementation is not required. Default use in start default implementation
Main optimization loop, called from the daemon thread created by :meth:`start`.
- Call monitor job on every ``TrainsJob`` in jobs:
- Check the performance or elapsed time, and then decide whether to kill the jobs.
- Call create_job:
- Check if spare job slots exist, and if they do call create a new job based on previous tested experiments.
:return: True, if continue the optimization. False, if immediately stop.
"""
updated_jobs = []
for job in self._current_jobs:
if self.monitor_job(job):
updated_jobs.append(job)
self._current_jobs = updated_jobs
pending_jobs = []
for job in self._pending_jobs:
if job.is_pending():
pending_jobs.append(job)
else:
self.budget.jobs.update(job.task_id(), 1)
self._pending_jobs = pending_jobs
free_workers = self._num_concurrent_workers - len(self._current_jobs)
# do not create more jobs if we hit the limit
if self.total_max_jobs and self._num_jobs >= self.total_max_jobs:
return bool(self._current_jobs)
# see how many free slots we have and create job
for i in range(max(0, free_workers)):
new_job = self.create_job()
if not new_job:
break
self._num_jobs += 1
new_job.launch(self._execution_queue)
self._current_jobs.append(new_job)
self._pending_jobs.append(new_job)
return bool(self._current_jobs)
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Abstract helper function. Implementation is not required. Default use in process_step default implementation
Create a new job if needed. return the newly created job. If no job needs to be created, return ``None``.
:return: A Newly created TrainsJob object, or None if no TrainsJob created.
"""
return None
def monitor_job(self, job):
# type: (TrainsJob) -> bool
"""
Helper function, Implementation is not required. Default use in process_step default implementation.
Check if the job needs to be aborted or already completed.
If returns ``False``, the job was aborted / completed, and should be taken off the current job list
If there is a budget limitation, this call should update
``self.budget.compute_time.update`` / ``self.budget.iterations.update``
:param TrainsJob job: A ``TrainsJob`` object to monitor.
:return: False, if the job is no longer relevant.
"""
abort_job = self.update_budget_per_job(job)
if abort_job:
job.abort()
return False
return not job.is_stopped()
def update_budget_per_job(self, job):
abort_job = False
if self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if elapsed > self.time_limit_per_job:
abort_job = True
if self.compute_time_limit:
if not self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if self.max_iteration_per_job:
iterations = self._get_job_iterations(job)
if iterations > 0:
self.budget.iterations.update(job.task_id(), iterations)
if iterations > self.max_iteration_per_job:
abort_job = True
return abort_job
def get_running_jobs(self):
# type: () -> Sequence[TrainsJob]
"""
Return the current running TrainsJobs.
:return: List of TrainsJob objects.
"""
return self._current_jobs
def get_created_jobs_ids(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now, including completed and running jobs.
The values of the returned dict are the parameters used in the specific job
:return: dict of task IDs (str) as keys, and their parameters dict as values.
"""
return {job_id: job_val[1] for job_id, job_val in self._created_jobs_ids.items()}
def get_created_jobs_tasks(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now.
The values of the returned dict are the TrainsJob.
:return: dict of task IDs (str) as keys, and their TrainsJob as values.
"""
return {job_id: job_val[0] for job_id, job_val in self._created_jobs_ids.items()}
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
# noinspection PyProtectedMember
top_tasks = self._get_child_tasks(
parent_task_id=self._job_parent_id or self._base_task_id,
order_by=self._objective_metric._get_last_metrics_encode_field(),
additional_filters={'page_size': int(top_k), 'page': 0})
return top_tasks
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self._objective_metric.get_objective_metric()
def helper_create_job(
self,
base_task_id, # type: str
parameter_override=None, # type: Optional[Mapping[str, str]]
task_overrides=None, # type: Optional[Mapping[str, str]]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> TrainsJob
"""
Create a Job using the specified arguments, ``TrainsJob`` for details.
:return: A newly created Job instance.
"""
if parameter_override:
param_str = ['{}={}'.format(k, parameter_override[k]) for k in sorted(parameter_override.keys())]
if self._naming_function:
name = self._naming_function(self._base_task_name, parameter_override)
elif self._naming_function is False:
name = None
else:
name = '{}: {}'.format(self._base_task_name, ' '.join(param_str))
comment = '\n'.join(param_str)
else:
name = None
comment = None
tags = (tags or []) + [self._tag, 'opt' + (': {}'.format(self._job_parent_id) if self._job_parent_id else '')]
new_job = self._job_class(
base_task_id=base_task_id, parameter_override=parameter_override,
task_overrides=task_overrides, tags=tags, parent=parent or self._job_parent_id,
name=name, comment=comment,
project=self._job_project_id or self._get_task_project(parent or self._job_parent_id),
**kwargs)
self._created_jobs_ids[new_job.task_id()] = (new_job, parameter_override)
logger.info('Creating new Task: {}'.format(parameter_override))
return new_job
def set_job_class(self, job_class):
# type: (TrainsJob) -> ()
"""
Set the class to use for the :meth:`helper_create_job` function.
:param TrainsJob job_class: The Job Class type.
"""
self._job_class = job_class
def set_job_default_parent(self, job_parent_task_id, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
"""
Set the default parent for all Jobs created by the :meth:`helper_create_job` method.
:param str job_parent_task_id: The parent Task ID.
:param str project_name: If specified, create the jobs in the specified project
"""
self._job_parent_id = job_parent_task_id
# noinspection PyProtectedMember
self._job_project_id = get_or_create_project(
session=Task._get_default_session(), project_name=project_name, description='HPO process spawned Tasks') \
if project_name else None
def set_job_naming_scheme(self, naming_function):
# type: (Optional[Callable[[str, dict], str]]) -> ()
"""
Set the function used to name a newly created job.
:param callable naming_function:
.. code-block:: py
naming_functor(base_task_name, argument_dict) -> str
"""
self._naming_function = naming_function
def set_optimizer_task(self, task):
# type: (Task) -> ()
"""
Set the optimizer task object to be used to store/generate reports on the optimization process.
Usually this is the current task of this process.
:param Task task: The optimizer`s current Task.
"""
self._optimizer_task = task
def _validate_base_task(self):
# type: () -> ()
"""
Check the base task exists and contains the requested Objective metric and hyper parameters.
"""
# check if the task exists
try:
task = Task.get_task(task_id=self._base_task_id)
self._base_task_name = task.name
except ValueError:
raise ValueError("Could not find base task id {}".format(self._base_task_id))
# check if the hyper-parameters exist:
task_parameters = task.get_parameters(backwards_compatibility=False)
missing_params = [h.name for h in self._hyper_parameters if h.name not in task_parameters]
if missing_params:
logger.warning('Could not find requested hyper-parameters {} on base task {}'.format(
missing_params, self._base_task_id))
# check if the objective metric exists (i.e. no typos etc)
if self._objective_metric.get_objective(self._base_task_id) is None:
logger.warning('Could not find requested metric {} report on base task {}'.format(
self._objective_metric.get_objective_metric(), self._base_task_id))
def _get_task_project(self, parent_task_id):
# type: (str) -> (Optional[str])
if not parent_task_id:
return
if parent_task_id not in self._job_project:
task = Task.get_task(task_id=parent_task_id)
self._job_project[parent_task_id] = task.project
return self._job_project.get(parent_task_id)
def _get_job_iterations(self, job):
# type: (Union[TrainsJob, Task]) -> int
iteration_value = self._objective_metric.get_current_raw_objective(job)
return iteration_value[0] if iteration_value else -1
@classmethod
def _get_child_tasks_ids(
cls,
parent_task_id, # type: str
status=None, # type: Optional[Union[Task.TaskStatusEnum], Sequence[Task.TaskStatusEnum]]
order_by=None, # type: Optional[str]
additional_filters=None # type: Optional[dict]
):
# type: (...) -> (Sequence[str])
"""
Helper function. Return a list of tasks is tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task IDs (str)
"""
task_filter = {
'parent': parent_task_id,
# 'tags': [cls._tag],
# since we have auto archive we do not want to filter out archived tasks
# 'system_tags': ['-archived'],
}
task_filter.update(additional_filters or {})
if status:
task_filter['status'] = status if isinstance(status, (tuple, list)) else [status]
if order_by and (order_by.startswith('last_metrics') or order_by.startswith('-last_metrics')):
parts = order_by.split('.')
if parts[-1] in ('min', 'max', 'last'):
title = hashlib.md5(str(parts[1]).encode('utf-8')).hexdigest()
series = hashlib.md5(str(parts[2]).encode('utf-8')).hexdigest()
minmax = 'min_value' if 'min' in parts[3] else ('max_value' if 'max' in parts[3] else 'value')
order_by = '{}last_metrics.'.join(
('-' if order_by and order_by[0] == '-' else '', title, series, minmax))
if order_by:
task_filter['order_by'] = [order_by]
# noinspection PyProtectedMember
task_objects = Task._query_tasks(**task_filter)
return [t.id for t in task_objects]
@classmethod
def _get_child_tasks(
cls,
parent_task_id, # type: str
status=None, # type: Optional[Union[Task.TaskStatusEnum], Sequence[Task.TaskStatusEnum]]
order_by=None, # type: Optional[str]
additional_filters=None # type: Optional[dict]
):
# type: (...) -> (Sequence[Task])
"""
Helper function. Return a list of tasks tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task objects
"""
return [
Task.get_task(task_id=t_id) for t_id in cls._get_child_tasks_ids(
parent_task_id=parent_task_id,
status=status,
order_by=order_by,
additional_filters=additional_filters)
]
class GridSearch(SearchStrategy):
"""
Grid search strategy controller. Full grid sampling of every hyper-parameter combination.
"""
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a grid search optimizer
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When the time limit is
exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job, When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(GridSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._param_iterator = None
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created TrainsJob object, or None if no TrainsJob is created.
"""
try:
parameters = self._next_configuration()
except StopIteration:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
def _next_configuration(self):
# type: () -> Mapping[str, str]
def param_iterator_fn():
hyper_params_values = [p.to_list() for p in self._hyper_parameters]
for state in product(*hyper_params_values):
yield dict(kv for d in state for kv in d.items())
if not self._param_iterator:
self._param_iterator = param_iterator_fn()
return next(self._param_iterator)
class RandomSearch(SearchStrategy):
"""
Random search strategy controller. Random uniform sampling of hyper-parameters.
"""
# Number of already chosen random samples before assuming we covered the entire hyper-parameter space
_hp_space_cover_samples = 42
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a random search optimizer.
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum umber of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes,
when time limit is exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job. When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(RandomSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._hyper_parameters_collection = set()
def create_job(self):
# type: () -> Optional[TrainsJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created TrainsJob object, or None if no TrainsJob created
"""
parameters = None
# maximum tries to ge a random set that is not already in the collection
for i in range(self._hp_space_cover_samples):
parameters = {}
for p in self._hyper_parameters:
parameters.update(p.get_value())
# hash the parameters dictionary
param_hash = hash(json.dumps(parameters, sort_keys=True))
# if this is a new set of parameters, use it.
if param_hash not in self._hyper_parameters_collection:
self._hyper_parameters_collection.add(param_hash)
break
# try again
parameters = None
# if we failed to find a random set of parameters, assume we selected all of them
if not parameters:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
class HyperParameterOptimizer(object):
"""
Hyper-parameter search controller. Clones the base experiment, changes arguments and tries to maximize/minimize
the defined objective.
"""
_tag = 'optimization'
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric_title, # type: str
objective_metric_series, # type: str
objective_metric_sign='min', # type: str
optimizer_class=RandomSearch, # type: type(SearchStrategy)
max_number_of_concurrent_tasks=10, # type: int
execution_queue='default', # type: str
optimization_time_limit=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
auto_connect_task=True, # type: Union[bool, Task]
always_create_task=False, # type: bool
spawn_project=None, # type: Optional[str]
save_top_k_tasks_only=None, # type: Optional[int]
**optimizer_kwargs # type: Any
):
# type: (...) -> ()
"""
Create a new hyper-parameter controller. The newly created object will launch and monitor the new experiments.
:param str base_task_id: The Task ID to be used as template experiment to optimize.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyper-parameter search
:param int max_number_of_concurrent_tasks: The maximum number of concurrent Tasks (experiments) running at the
same time.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param float optimization_time_limit: The maximum time (minutes) for the entire optimization process. The
default is ``None``, indicating no time limit.
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param bool auto_connect_task: Store optimization arguments and configuration in the Task
The values are:
- ``True`` - The optimization argument and configuration will be stored in the Task. All arguments will
be under the hyper-parameter section ``opt``, and the optimization hyper_parameters space will
stored in the Task configuration object section.
- ``False`` - Do not store with Task.
- ``Task`` - A specific Task object to connect the optimization process with.
:param bool always_create_task: Always create a new Task
The values are:
- ``True`` - No current Task initialized. Create a new task named ``optimization`` in the ``base_task_id``
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param str spawn_project: If project name is specified, create all optimization Jobs (Tasks) in the
specified project instead of the original base_task_id project.
:param int save_top_k_tasks_only: If specified and above 0, keep only the top_k performing Tasks,
and archive the rest of the created Tasks. Default: -1 keep everything, nothing will be archived.
:param ** optimizer_kwargs: Arguments passed directly to the optimizer constructor.
Example:
.. code-block:: py
:linenos:
:caption: Example
from clearml import Task
from clearml.automation import UniformParameterRange, DiscreteParameterRange
from clearml.automation import GridSearch, RandomSearch, HyperParameterOptimizer
task = Task.init('examples', 'HyperParameterOptimizer example')
an_optimizer = HyperParameterOptimizer(
base_task_id='fa30fa45d95d4927b87c323b5b04dc44',
hyper_parameters=[
UniformParameterRange('lr', min_value=0.01, max_value=0.3, step_size=0.05),
DiscreteParameterRange('network', values=['ResNet18', 'ResNet50', 'ResNet101']),
],
objective_metric_title='title',
objective_metric_series='series',
objective_metric_sign='min',
max_number_of_concurrent_tasks=5,
optimizer_class=RandomSearch,
execution_queue='workers', time_limit_per_job=120, pool_period_min=0.2)
# This will automatically create and print the optimizer new task id
# for later use. if a Task was already created, it will use it.
an_optimizer.set_time_limit(in_minutes=10.)
an_optimizer.start()
# we can create a pooling loop if we like
while not an_optimizer.reached_time_limit():
top_exp = an_optimizer.get_top_experiments(top_k=3)
print(top_exp)
# wait until optimization completed or timed-out
an_optimizer.wait()
# make sure we stop all jobs
an_optimizer.stop()
"""
# create a new Task, if we do not have one already
self._task = auto_connect_task if isinstance(auto_connect_task, Task) else Task.current_task()
if not self._task and always_create_task:
base_task = Task.get_task(task_id=base_task_id)
self._task = Task.init(
project_name=base_task.get_project_name(),
task_name='Optimizing: {}'.format(base_task.name),
task_type=Task.TaskTypes.optimizer,
)
opts = dict(
base_task_id=base_task_id,
objective_metric_title=objective_metric_title,
objective_metric_series=objective_metric_series,
objective_metric_sign=objective_metric_sign,
max_number_of_concurrent_tasks=max_number_of_concurrent_tasks,
execution_queue=execution_queue,
optimization_time_limit=optimization_time_limit,
compute_time_limit=compute_time_limit,
optimizer_kwargs=optimizer_kwargs)
# make sure all the created tasks are our children, as we are creating them
if self._task:
self._task.add_tags([self._tag])
if auto_connect_task:
optimizer_class, hyper_parameters, opts = self._connect_args(
optimizer_class=optimizer_class, hyper_param_configuration=hyper_parameters, **opts)
self.base_task_id = opts['base_task_id']
self.hyper_parameters = hyper_parameters
self.max_number_of_concurrent_tasks = opts['max_number_of_concurrent_tasks']
self.execution_queue = opts['execution_queue']
self.objective_metric = Objective(
title=opts['objective_metric_title'], series=opts['objective_metric_series'],
order='min' if opts['objective_metric_sign'] in ('min', 'min_global') else 'max',
extremum=opts['objective_metric_sign'].endswith('_global'))
# if optimizer_class is an instance, use it as is.
if type(optimizer_class) != type:
self.optimizer = optimizer_class
else:
self.optimizer = optimizer_class(
base_task_id=opts['base_task_id'], hyper_parameters=hyper_parameters,
objective_metric=self.objective_metric, execution_queue=opts['execution_queue'],
num_concurrent_workers=opts['max_number_of_concurrent_tasks'],
compute_time_limit=opts['compute_time_limit'], **opts.get('optimizer_kwargs', {}))
self.optimizer.set_optimizer_task(self._task)
self.optimization_timeout = None
self.optimization_start_time = None
self._thread = None
self._stop_event = None
self._report_period_min = 5.
self._thread_reporter = None
self._experiment_completed_cb = None
self._save_top_k_tasks_only = max(0, save_top_k_tasks_only or 0)
self.optimizer.set_job_default_parent(
self._task.id if self._task else None, project_name=spawn_project or None)
self.set_time_limit(in_minutes=opts['optimization_time_limit'])
def get_num_active_experiments(self):
# type: () -> int
"""
Return the number of current active experiments.
:return: The number of active experiments.
"""
if not self.optimizer:
return 0
return len(self.optimizer.get_running_jobs())
def get_active_experiments(self):
# type: () -> Sequence[Task]
"""
Return a list of Tasks of the current active experiments.
:return: A list of Task objects, representing the current active experiments.
"""
if not self.optimizer:
return []
return [j.task for j in self.optimizer.get_running_jobs()]
def start(self, job_complete_callback=None):
# type: (Optional[Callable[[str, float, int, dict, str], None]]) -> bool
"""
Start the HyperParameterOptimizer controller. If the calling process is stopped, then the controller stops
as well.
:param Callable job_complete_callback: Callback function, called when a job is completed.
.. code-block:: py
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
if not self.optimizer:
return False
if self._thread:
return True
self.optimization_start_time = time()
self._experiment_completed_cb = job_complete_callback
self._stop_event = Event()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._thread_reporter = Thread(target=self._report_daemon)
self._thread_reporter.daemon = True
self._thread_reporter.start()
return True
def stop(self, timeout=None, wait_for_reporter=True):
# type: (Optional[float], Optional[bool]) -> ()
"""
Stop the HyperParameterOptimizer controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
:param wait_for_reporter: Wait for reporter to flush data.
"""
if not self._thread or not self._stop_event or not self.optimizer:
if self._thread_reporter and wait_for_reporter:
self._thread_reporter.join()
return
_thread = self._thread
self._stop_event.set()
self.optimizer.stop()
# wait for optimizer thread
if timeout is not None:
_thread.join(timeout=timeout * 60.)
# stop all running tasks:
for j in self.optimizer.get_running_jobs():
j.abort()
# clear thread
self._thread = None
if wait_for_reporter:
# wait for reporter to flush
self._thread_reporter.join()
def is_active(self):
# type: () -> bool
"""
Is the optimization procedure active (still running)
The values are:
- ``True`` - The optimization procedure is active (still running).
- ``False`` - The optimization procedure is not active (not still running).
.. note::
If the daemon thread has not yet started, ``is_active`` returns ``True``.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._stop_event is None or self._thread is not None
def is_running(self):
# type: () -> bool
"""
Is the optimization controller is running
The values are:
- ``True`` - The optimization procedure is running.
- ``False`` - The optimization procedure is running.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._thread is not None
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
"""
Wait for the optimizer to finish.
.. note::
This method does not stop the optimizer. Call :meth:`stop` to terminate the optimizer.
:param float timeout: The timeout to wait for the optimization to complete (minutes).
If ``None``, then wait until we reached the timeout, or optimization completed.
:return: True, if the optimization finished. False, if the optimization timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.
else:
timeout = max(0, self.optimization_timeout - self.optimization_start_time) \
if self.optimization_timeout else None
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def set_time_limit(self, in_minutes=None, specific_time=None):
# type: (Optional[float], Optional[datetime]) -> ()
"""
Set a time limit for the HyperParameterOptimizer controller. If we reached the time limit, stop the optimization
process. If ``specific_time`` is provided, use it; otherwise, use the ``in_minutes``.
:param float in_minutes: The maximum processing time from current time (minutes).
:param datetime specific_time: The specific date/time limit.
"""
if specific_time:
self.optimization_timeout = specific_time.timestamp()
else:
self.optimization_timeout = (float(in_minutes) * 60.) + time() if in_minutes else None
def get_time_limit(self):
# type: () -> datetime
"""
Return the controller optimization time limit.
:return: The absolute datetime limit of the controller optimization process.
"""
return datetime.fromtimestamp(self.optimization_timeout)
def elapsed(self):
# type: () -> float
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self.optimization_start_time is None:
return -1.0
return (time() - self.optimization_start_time) / 60.
def reached_time_limit(self):
# type: () -> bool
"""
Did the optimizer reach the time limit
The values are:
- ``True`` - The time limit passed.
- ``False`` - The time limit did not pass.
This method returns immediately, it does not wait for the optimizer.
:return: True, if optimizer is running and we passed the time limit, otherwise returns False.
"""
if self.optimization_start_time is None:
return False
if not self.is_running():
return False
return time() > self.optimization_timeout
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
if not self.optimizer:
return []
return self.optimizer.get_top_experiments(top_k=top_k)
def get_optimizer(self):
# type: () -> SearchStrategy
"""
Return the currently used optimizer object.
:return: The SearchStrategy object used.
"""
return self.optimizer
def set_default_job_class(self, job_class):
# type: (TrainsJob) -> ()
"""
Set the Job class to use when the optimizer spawns new Jobs.
:param TrainsJob job_class: The Job Class type.
"""
self.optimizer.set_job_class(job_class)
def set_report_period(self, report_period_minutes):
# type: (float) -> ()
"""
Set reporting period for the accumulated objective report (minutes). This report is sent on the Optimizer Task,
and collects the Objective metric from all running jobs.
:param float report_period_minutes: The reporting period (minutes). The default is once every 10 minutes.
"""
self._report_period_min = float(report_period_minutes)
@classmethod
def get_optimizer_top_experiments(
cls,
objective_metric_title, # type: str
objective_metric_series, # type: str
objective_metric_sign, # type: str
optimizer_task_id, # type: str
top_k, # type: int
):
# type: (...) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments
for a specific HyperParameter Optimization session (i.e. Task ID), based on the title/series objective.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param str optimizer_task_id: Parent optimizer Task ID
:param top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
objective = Objective(
title=objective_metric_title, series=objective_metric_series, order=objective_metric_sign)
return objective.get_top_tasks(top_k=top_k, optimizer_task_id=optimizer_task_id)
def _connect_args(self, optimizer_class=None, hyper_param_configuration=None, **kwargs):
# type: (SearchStrategy, dict, Any) -> (SearchStrategy, list, dict)
if not self._task:
logger.warning('Auto Connect turned on but no Task was found, '
'hyper-parameter optimization argument logging disabled')
return optimizer_class, hyper_param_configuration, kwargs
configuration_dict = {'parameter_optimization_space': [c.to_dict() for c in hyper_param_configuration]}
self._task.connect_configuration(configuration_dict)
# this is the conversion back magic:
configuration_dict = {'parameter_optimization_space': [
Parameter.from_dict(c) for c in configuration_dict['parameter_optimization_space']]}
complex_optimizer_kwargs = None
if 'optimizer_kwargs' in kwargs:
# do not store complex optimizer kwargs:
optimizer_kwargs = kwargs.pop('optimizer_kwargs', {})
complex_optimizer_kwargs = {
k: v for k, v in optimizer_kwargs.items()
if not isinstance(v, six.string_types + six.integer_types +
(six.text_type, float, list, tuple, dict, type(None)))}
kwargs['optimizer_kwargs'] = {
k: v for k, v in optimizer_kwargs.items() if k not in complex_optimizer_kwargs}
# skip non basic types:
arguments = {'opt': kwargs}
if type(optimizer_class) != type:
logger.warning('Auto Connect optimizer_class disabled, {} is already instantiated'.format(optimizer_class))
self._task.connect(arguments)
else:
arguments['opt']['optimizer_class'] = str(optimizer_class).split('.')[-1][:-2] \
if not isinstance(optimizer_class, str) else optimizer_class
self._task.connect(arguments)
# this is the conversion back magic:
original_class = optimizer_class
optimizer_class = arguments['opt'].pop('optimizer_class', None)
if optimizer_class == 'RandomSearch':
optimizer_class = RandomSearch
elif optimizer_class == 'GridSearch':
optimizer_class = GridSearch
elif optimizer_class == 'OptimizerBOHB':
from .hpbandster import OptimizerBOHB
optimizer_class = OptimizerBOHB
elif optimizer_class == 'OptimizerOptuna':
from .optuna import OptimizerOptuna
optimizer_class = OptimizerOptuna
else:
logger.warning("Could not resolve optimizer_class {} reverting to original class {}".format(
optimizer_class, original_class))
optimizer_class = original_class
if complex_optimizer_kwargs:
if 'optimizer_kwargs' not in arguments['opt']:
arguments['opt']['optimizer_kwargs'] = complex_optimizer_kwargs
else:
arguments['opt']['optimizer_kwargs'].update(complex_optimizer_kwargs)
return optimizer_class, configuration_dict['parameter_optimization_space'], arguments['opt']
def _daemon(self):
# type: () -> ()
"""
Implement the main pooling thread, calling loop every ``self.pool_period_minutes`` minutes.
"""
self.optimizer.start()
self._thread = None
def _report_daemon(self):
# type: () -> ()
title, series = self.objective_metric.get_objective_metric()
title = '{}/{}'.format(title, series)
counter = 0
completed_jobs = dict()
task_logger = None
cur_completed_jobs = set()
cur_task = self._task or Task.current_task()
if cur_task and self.optimizer:
# noinspection PyProtectedMember
child_tasks = self.optimizer._get_child_tasks(
parent_task_id=cur_task.id, status=['completed', 'stopped'])
hyper_parameters = [h.name for h in self.hyper_parameters]
for task in child_tasks:
params = {k: v for k, v in task.get_parameters().items() if k in hyper_parameters}
params["status"] = str(task.status)
# noinspection PyProtectedMember
iteration_value = task.get_last_iteration()
objective = self.objective_metric.get_objective(task)
completed_jobs[task.id] = (
objective if objective is not None else -1,
iteration_value if iteration_value is not None else -1,
params
)
while self._thread is not None:
timeout = self.optimization_timeout - time() if self.optimization_timeout else 0.
if timeout >= 0:
timeout = min(self._report_period_min * 60., timeout if timeout else self._report_period_min * 60.)
# make sure that we have the first report fired before we actually go to sleep, wait for 15 sec.
if counter <= 0:
timeout = 15
print('Progress report #{} completed, sleeping for {} minutes'.format(counter, timeout / 60.))
if self._stop_event.wait(timeout=timeout):
# wait for one last report
timeout = -1
counter += 1
# get task to report on.
cur_task = self._task or Task.current_task()
if cur_task:
task_logger = cur_task.get_logger()
# do some reporting
self._report_remaining_budget(task_logger, counter)
if self.optimizer.budget.compute_time.used and self.optimizer.budget.compute_time.used >= 1.0:
# Reached compute time limit
timeout = -1
self._report_resources(task_logger, counter)
# collect a summary of all the jobs and their final objective values
cur_completed_jobs = set(self.optimizer.get_created_jobs_ids().keys()) - \
{j.task_id() for j in self.optimizer.get_running_jobs()}
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
# if we should leave, stop everything now.
if timeout < 0:
# we should leave
self.stop(wait_for_reporter=False)
return
if task_logger and counter:
counter += 1
self._report_remaining_budget(task_logger, counter)
self._report_resources(task_logger, counter)
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title, force=True)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
def _report_completed_status(self, completed_jobs, cur_completed_jobs, task_logger, title, force=False):
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
best_experiment = \
(self.objective_metric.get_normalized_objective(job_ids_sorted_by_objective[0]),
job_ids_sorted_by_objective[0]) \
if job_ids_sorted_by_objective else (float('-inf'), None)
if force or cur_completed_jobs != set(completed_jobs.keys()):
pairs = []
labels = []
created_jobs = copy(self.optimizer.get_created_jobs_ids())
id_status = {j_id: j_run.status() for j_id, j_run in self.optimizer.get_created_jobs_tasks().items()}
for i, (job_id, params) in enumerate(created_jobs.items()):
value = self.objective_metric.get_objective(job_id)
if job_id in completed_jobs:
if value != completed_jobs[job_id][0]:
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
elif completed_jobs.get(job_id):
completed_jobs[job_id] = (completed_jobs[job_id][0],
completed_jobs[job_id][1],
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
pairs.append((i, completed_jobs[job_id][0]))
labels.append(str(completed_jobs[job_id][2])[1:-1])
elif value is not None:
pairs.append((i, value))
labels.append(str(params)[1:-1])
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
# callback new experiment completed
if self._experiment_completed_cb:
normalized_value = self.objective_metric.get_normalized_objective(job_id)
if normalized_value is not None and normalized_value > best_experiment[0]:
best_experiment = normalized_value, job_id
c = completed_jobs[job_id]
self._experiment_completed_cb(job_id, c[0], c[1], c[2], best_experiment[1])
if pairs:
print('Updating job performance summary plot/table')
# update scatter plot
task_logger.report_scatter2d(
title='Optimization Objective', series=title,
scatter=pairs, iteration=0, labels=labels,
mode='markers', xaxis='job #', yaxis='objective')
# update summary table
job_ids = list(completed_jobs.keys())
job_ids_sorted_by_objective = sorted(
job_ids, key=lambda x: completed_jobs[x][0], reverse=bool(self.objective_metric.sign >= 0))
# sort the columns except for 'objective', 'iteration'
columns = list(sorted(set([c for k, v in completed_jobs.items() for c in v[2].keys()])))
# add the index column (task id) and the first two columns 'objective', 'iteration' then the rest
table_values = [['task id', 'objective', 'iteration'] + columns]
table_values += \
[([job, completed_jobs[job][0], completed_jobs[job][1]] +
[completed_jobs[job][2].get(c, '') for c in columns]) for job in job_ids_sorted_by_objective]
task_logger.report_table(
"summary", "job", 0, table_plot=table_values,
extra_layout={"title": "objective: {}".format(title)})
# Build parallel Coordinates: convert to columns, and reorder accordingly
if len(table_values) > 1:
table_values_columns = [[row[i] for row in table_values] for i in range(len(table_values[0]))]
table_values_columns = \
[[table_values_columns[0][0]] + [c[:6]+'...' for c in table_values_columns[0][1:]]] + \
table_values_columns[2:-1] + [[title]+table_values_columns[1][1:]]
pcc_dims = []
for col in table_values_columns:
# test if all values are numbers:
try:
# try to cast all values to float
values = [float(v) for v in col[1:]]
d = dict(label=col[0], values=values)
except (ValueError, TypeError):
values = list(range(len(col[1:])))
ticks = col[1:]
d = dict(label=col[0], values=values, tickvals=values, ticktext=ticks)
pcc_dims.append(d)
# report parallel coordinates
plotly_pcc = dict(
data=[dict(
type='parcoords',
line=dict(colorscale='Viridis',
reversescale=bool(self.objective_metric.sign >= 0),
color=table_values_columns[-1][1:]),
dimensions=pcc_dims)],
layout={})
task_logger.report_plotly(
title='Parallel Coordinates', series='',
iteration=0, figure=plotly_pcc)
# upload summary as artifact
if force:
task = self._task or Task.current_task()
if task:
task.upload_artifact(name='summary', artifact_object={'table': table_values})
def _report_remaining_budget(self, task_logger, counter):
# noinspection PyBroadException
try:
budget = self.optimizer.budget.to_dict()
except Exception:
budget = {}
# report remaining budget
for budget_part, value in budget.items():
task_logger.report_scalar(
title='remaining budget', series='{} %'.format(budget_part),
iteration=counter, value=round(100 - value['used'] * 100., ndigits=1))
if self.optimization_timeout and self.optimization_start_time:
task_logger.report_scalar(
title='remaining budget', series='time %',
iteration=counter,
value=round(100 - (100. * (time() - self.optimization_start_time) /
(self.optimization_timeout - self.optimization_start_time)), ndigits=1)
)
def _report_completed_tasks_best_results(self, completed_jobs, task_logger, title, counter):
# type: (Set[str], Logger, str, int) -> ()
if not completed_jobs:
return
value_func, series_name = (max, "max") if self.objective_metric.get_objective_sign() > 0 else \
(min, "min")
latest_completed, obj_values = self._get_latest_completed_task_value(completed_jobs, series_name)
if latest_completed:
val = value_func(obj_values)
task_logger.report_scalar(
title=title,
series=series_name,
iteration=counter,
value=val)
task_logger.report_scalar(
title=title,
series="last reported",
iteration=counter,
value=latest_completed)
def _report_resources(self, task_logger, iteration):
# type: (Logger, int) -> ()
self._report_active_workers(task_logger, iteration)
self._report_tasks_status(task_logger, iteration)
def _report_active_workers(self, task_logger, iteration):
# type: (Logger, int) -> ()
res = self.__get_session().send(workers_service.GetAllRequest())
response = res.wait()
if response.ok():
all_workers = response
queue_workers = len(
[
worker.get("id")
for worker in all_workers.response_data.get("workers")
for q in worker.get("queues")
if q.get("name") == self.execution_queue
]
)
task_logger.report_scalar(title="resources",
series="queue workers",
iteration=iteration,
value=queue_workers)
def _report_tasks_status(self, task_logger, iteration):
# type: (Logger, int) -> ()
tasks_status = {"running tasks": 0, "pending tasks": 0}
for job in self.optimizer.get_running_jobs():
if job.is_running():
tasks_status["running tasks"] += 1
else:
tasks_status["pending tasks"] += 1
for series, val in tasks_status.items():
task_logger.report_scalar(
title="resources", series=series,
iteration=iteration, value=val)
def _get_latest_completed_task_value(self, cur_completed_jobs, series_name):
# type: (Set[str], str) -> (float, List[float])
completed_value = None
latest_completed = None
obj_values = []
cur_task = self._task or Task.current_task()
for j in cur_completed_jobs:
res = cur_task.send(tasks_service.GetByIdRequest(task=j))
response = res.wait()
if not response.ok() or response.response_data["task"].get("status") != Task.TaskStatusEnum.completed:
continue
completed_time = datetime.strptime(response.response_data["task"]["completed"].partition("+")[0],
"%Y-%m-%dT%H:%M:%S.%f")
completed_time = completed_time.timestamp()
completed_values = self._get_last_value(response)
obj_values.append(completed_values['max_value'] if series_name == "max" else completed_values['min_value'])
if not latest_completed or completed_time > latest_completed:
latest_completed = completed_time
completed_value = completed_values['value']
return completed_value, obj_values
def _get_last_value(self, response):
metrics, title, series, values = TrainsJob.get_metric_req_params(self.objective_metric.title,
self.objective_metric.series)
last_values = response.response_data["task"]['last_metrics'][title][series]
return last_values
def _auto_archive_low_performance_tasks(self, completed_jobs):
if self._save_top_k_tasks_only <= 0:
return
# sort based on performance
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
# query system_tags only
res = self.__get_session().send(tasks_service.GetAllRequest(
id=job_ids_sorted_by_objective, status=['completed', 'stopped'], only_fields=['id', 'system_tags']))
response = res.wait()
if not response.ok():
return
tasks_system_tags_lookup = {
task.get("id"): task.get("system_tags") for task in response.response_data.get("tasks")}
for i, task_id in enumerate(job_ids_sorted_by_objective):
system_tags = tasks_system_tags_lookup.get(task_id, [])
if i < self._save_top_k_tasks_only and Task.archived_tag in system_tags:
print('Restoring from archive Task id={} (#{} objective={})'.format(
task_id, i, completed_jobs[task_id][0]))
# top_k task and is archived, remove archive tag
system_tags = list(set(system_tags) - {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True))
res.wait()
elif i >= self._save_top_k_tasks_only and Task.archived_tag not in system_tags:
print('Archiving Task id={} (#{} objective={})'.format(
task_id, i, completed_jobs[task_id][0]))
# Not in top_k task and not archived, add archive tag
system_tags = list(set(system_tags) | {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True))
res.wait()
def __get_session(self):
cur_task = self._task or Task.current_task()
if cur_task:
return cur_task.default_session
# noinspection PyProtectedMember
return Task._get_default_session()
def __sort_jobs_by_objective(self, completed_jobs):
if not completed_jobs:
return []
job_ids_sorted_by_objective = list(sorted(
completed_jobs.keys(), key=lambda x: completed_jobs[x][0], reverse=bool(self.objective_metric.sign >= 0)))
return job_ids_sorted_by_objective
|
robotiq2f140_real.py
|
import socket
import struct
import threading
import time
from subprocess import check_output
import numpy as np
import rospy
from airobot.ee_tool.ee import EndEffectorTool
from airobot.utils.common import clamp, print_red
from airobot.utils.urscript_util import Robotiq2F140URScript
from control_msgs.msg import GripperCommandActionGoal
from sensor_msgs.msg import JointState
from std_msgs.msg import String
class Robotiq2F140Real(EndEffectorTool):
"""
Class for interfacing with Robotiq 2F140 gripper when
it is attached to UR5e arm. Communication with the gripper
is either through ROS over through a TCP/IP socket.
Args:
cfgs (YACS CfgNode): configurations for the gripper.
Attributes:
cfgs (YACS CfgNode): configurations for the gripper.
jnt_names (list): list of joint names of the gripper.
"""
def __init__(self, cfgs):
super(Robotiq2F140Real, self).__init__(cfgs=cfgs)
self.jnt_names = [
'finger_joint', 'left_inner_knuckle_joint',
'left_inner_finger_joint', 'right_outer_knuckle_joint',
'right_inner_knuckle_joint', 'right_inner_finger_joint'
]
self._gazebo_sim = rospy.get_param('sim')
self._comm_initialized = False
self._get_state_lock = threading.RLock()
self._initialize_comm()
if not self._gazebo_sim:
self._gripper_data = None
self._pub_state_lock = threading.RLock()
self._updated_gripper_pos = JointState()
self._updated_gripper_pos.name = ['finger_joint']
self._updated_gripper_pos.position = [0.0]
self._err_thresh = 1
self._local_ip_addr = None
local_ip = self._get_local_ip()
# we assume the machine is connected to a router
if local_ip is not None:
self._local_ip_addr = local_ip
else:
raise ValueError('Could not get local ip address')
self._get_current_pos_urscript()
self._pub_gripper_thread = threading.Thread(
target=self._pub_pos_target)
self._pub_gripper_thread.daemon = True
self._pub_gripper_thread.start()
def activate(self):
"""
Method to activate the gripper.
"""
if not self._gazebo_sim:
urscript = self._get_new_urscript()
urscript.set_activate()
urscript.set_gripper_speed(self.cfgs.EETOOL.DEFAULT_SPEED)
urscript.sleep(0.1)
self._pub_command.publish(urscript())
time.sleep(3)
if not self._gazebo_sim:
self._get_current_pos_urscript()
def set_pos(self, pos):
"""
Set the gripper position. Function internally maps
values from API position range to URScript position
range. After sending position command, update internal
position variable by sending urscript program to
controller.
Args:
pos (float): Desired gripper position.
"""
pos = clamp(
pos,
self.cfgs.EETOOL.OPEN_ANGLE,
self.cfgs.EETOOL.CLOSE_ANGLE
)
if not self._gazebo_sim:
urscript = self._get_new_urscript()
pos = int(pos * self.cfgs.EETOOL.POSITION_SCALING)
urscript.set_gripper_position(pos)
urscript.sleep(0.1)
gripper_cmd = urscript()
else:
gripper_cmd = GripperCommandActionGoal()
gripper_cmd.goal.command.position = pos
self._pub_command.publish(gripper_cmd)
time.sleep(1.0)
if not self._gazebo_sim:
self._get_current_pos_urscript()
def set_speed(self, speed):
"""
Set the default speed which the gripper should move at.
Args:
speed (int): Desired gripper speed (0 min, 255 max).
"""
speed = int(clamp(speed, 0, 255))
if not self._gazebo_sim:
urscript = self._get_new_urscript()
urscript.set_gripper_speed(speed)
urscript.sleep(0.1)
self._pub_command.publish(urscript())
def open(self):
"""
Open gripper.
"""
self.set_pos(self.cfgs.EETOOL.OPEN_ANGLE)
def close(self):
"""
Close gripper.
"""
self.set_pos(self.cfgs.EETOOL.CLOSE_ANGLE)
def get_pos(self):
"""
Get the current position of the gripper.
"""
self._get_state_lock.acquire()
pos = self._gripper_data
self._get_state_lock.release()
return pos
def _get_current_pos_cb(self, msg):
"""
Callback for rospy subscriber to get joint information.
Args:
msg (JointState): Contains the full joint state topic
published.
"""
if 'finger_joint' in msg.name:
idx = msg.name.index('finger_joint')
if idx < len(msg.position):
self._get_state_lock.acquire()
self._gripper_data = msg.position[idx]
self._get_state_lock.release()
def _get_new_urscript(self):
"""
Internal method used to create an empty URScript
program, which is filled with URScript commands and
eventually sent to the robot over one of the communication
interfaces.
"""
urscript = Robotiq2F140URScript(
socket_host=self.cfgs.EETOOL.SOCKET_HOST,
socket_port=self.cfgs.EETOOL.SOCKET_PORT,
socket_name=self.cfgs.EETOOL.SOCKET_NAME)
urscript.sleep(0.1)
return urscript
def _get_current_pos_urscript(self):
"""
Function to send a urscript message to the robot to update
the gripper position value. URScript program is created to
create socket connection with the remote machine, and the
corresponding local socket is created to receive the incoming
data. The value only updates after the gripper has stopped
moving, by checking to see if the same received value is
consecutively consistent, and is eventually published to the
gripper state topic. Function will exit if timeout is reached.
"""
if self._gazebo_sim:
return
tcp_port = 50201
tcp_msg = 'def process():\n'
tcp_msg += ' socket_open("127.0.0.1",63352,"gripper_socket")\n'
tcp_msg += ' rq_pos = socket_get_var("POS","gripper_socket")\n'
tcp_msg += ' sync()\n'
tcp_msg += ' textmsg("value = ",rq_pos)\n'
tcp_msg += ' socket_open("%s",%d,"desktop_socket")\n' % \
(self._local_ip_addr, tcp_port)
tcp_msg += ' socket_send_int(rq_pos,"desktop_socket")\n'
tcp_msg += ' sync()\n'
tcp_msg += ' socket_close("desktop_socket")\n'
tcp_msg += 'end\n'
self._pub_command.publish(tcp_msg)
returned_pos = None
last_returned_pos = 0.0
gripper_stopped = False
check_equal_pos = 10
equal_pos = 0
start = time.time()
hostname = socket.gethostbyname('0.0.0.0')
buffer_size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(self.cfgs.EETOOL.UPDATE_TIMEOUT)
s.bind((hostname, tcp_port))
while returned_pos is None and not gripper_stopped:
if time.time() - start > self.cfgs.EETOOL.UPDATE_TIMEOUT:
prnt_str = 'Unable to update gripper position value in %f' \
' s, exiting' % self.cfgs.EETOOL.UPDATE_TIMEOUT
print_red(prnt_str)
s.close()
return
try:
s.listen(1)
conn, _ = s.accept()
except socket.timeout:
prnt_str = 'Unable to accept from socket in %f' \
' s, exiting' % self.cfgs.EETOOL.UPDATE_TIMEOUT
print_red(prnt_str)
s.close()
return
data = conn.recv(buffer_size)
if not data:
continue
returned_pos = int(struct.unpack('!i', data[0:4])[0])
if np.abs(returned_pos - last_returned_pos) < self._err_thresh:
equal_pos += 1
else:
equal_pos = 0
if equal_pos >= check_equal_pos:
gripper_stopped = True
last_returned_pos = returned_pos
self._pub_state_lock.acquire()
self._updated_gripper_pos.position[0] = returned_pos
self._pub_state_lock.release()
s.close()
def _pub_pos_target(self):
"""
Function to run in background thread to publish updated
gripper state.
"""
while not rospy.is_shutdown():
try:
self._pub_state_lock.acquire()
self._pub_gripper_pos.publish(self._updated_gripper_pos)
self._pub_state_lock.release()
time.sleep(0.002)
except rospy.ROSException:
pass
def _get_local_ip(self):
"""
Function to get machine ip address on local network.
Returns:
str: Local IP address
"""
raw_ips = check_output(['hostname', '--all-ip-addresses'])
ips = raw_ips.decode('utf8')
ip_list = ips.split()
for ip in ip_list:
if ip.startswith(self.cfgs.EETOOL.IP_PREFIX):
return ip
return None
def _initialize_comm(self):
"""
Set up the internal publisher to send gripper command
URScript programs to the robot thorugh ROS.
"""
if self._gazebo_sim:
self._pub_command = rospy.Publisher(
self.cfgs.EETOOL.GAZEBO_COMMAND_TOPIC,
GripperCommandActionGoal,
queue_size=10)
else:
self._pub_command = rospy.Publisher(
self.cfgs.EETOOL.COMMAND_TOPIC,
String,
queue_size=10)
self._pub_gripper_pos = rospy.Publisher(
'/gripper_state',
JointState,
queue_size=10)
self._sub_position = rospy.Subscriber(
self.cfgs.EETOOL.JOINT_STATE_TOPIC,
JointState,
self._get_current_pos_cb
)
time.sleep(1.0)
self._comm_initialized = True
|
echo_server.py
|
import socket
import threading
import time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 监听端口:
s.bind(('127.0.0.1', 9999))
s.listen(5)
print('Waiting for connection...')
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
PakeMail_sandbox.py
|
from io import StringIO
from email.mime.base import MIMEBase
from email.message import Message
import base64
import mimetypes
import os
from spake2 import SPAKE2_A
from spake2 import SPAKE2_B
from spake2.parameters.i1024 import Params1024
from spake2.parameters.i2048 import Params2048
from spake2.parameters.i3072 import Params3072
from pakemail import PakeMail, PakeMailService
from pakemod import PakeClient,Roles,Parameters
import pakemod
import nacl.secret, nacl.utils
import sys
from threading import Thread
import getpass, time, timeit
import gnupg
def generateKeyPairForEmail():
email = input("Please enter an email address:")
phrase = getpass.getpass("Please enter a passphrase:")
gpg = pakemod.getGpgHandler()
key_data = gpg.gen_key_input(key_type="RSA", key_length=2048, name_email=email, passphrase=phrase)
gpg.gen_key(key_data)
def runCryptoExperiment(pake_key, message):
print("-------------------------------------------------------------------")
print("Encrypting and decrypting a message using the PAKE-generated key...")
secret_box = nacl.secret.SecretBox(pake_key)
print("The following message will be encrypted:")
print(message.decode('utf-8'))
encrypted = secret_box.encrypt(message)
print("The obtained ciphertext is\n", encrypted)
print("The decrypted plaintext is:")
plaintext = secret_box.decrypt(encrypted)
print(plaintext.decode('utf-8'))
def run_local_pake_test():
executionTime = float(0)
start = time.process_time()
pakeClientA = PakeClient("A", "pass", "test+senderA@gmail.com", parameters=Params1024)
pakeClientB = PakeClient("B", "pass", "test+receiverB@gmail.com", parameters=Params1024)
pakeClientA.registerRemotePakeClient(pakeClientB)
pakeClientB.registerRemotePakeClient(pakeClientA)
pakeClientA.setup(localTest=True)
pakeClientB.setup(localTest=True)
pakeMsgA1 = pakeClientA.pakeMessage
pakeMsgB2 = pakeClientB.pakeMessage
pakeClientA.computeKey(pakeMsgB2)
keyA = pakeClientA.key
pakeClientB.computeKey(pakeMsgA1)
keyB = pakeClientB.key
print(base64.b64encode(keyA))
print(base64.b64encode(keyB))
print("Intermediate secret keys match: ", keyA == keyB)
print("Key confirmation starts...")
kA, aMacKeyA, aMacKeyB = pakeClientA.runKeyDerivation()
pakeClientA.computeTranscript()
macMessageA = pakeClientA.pkAfpr+pakeClientA.pkBfpr+pakeClientA.transcript
print("MAC message A:", macMessageA)
tauA = pakeClientA.computeMAC(aMacKeyA, macMessageA)
pakeClientA.createMacMsg(tauA, writeToFile=True)
print("tau_A :\n", tauA)
expected_tauB = pakeClientA.computeMAC(aMacKeyB, macMessageA)
print("expected tau_B :\n", expected_tauB)
kB, bMacKeyA, bMacKeyB = pakeClientB.runKeyDerivation()
pakeClientB.computeTranscript()
macMessageB = pakeClientB.pkAfpr+pakeClientB.pkBfpr+pakeClientB.transcript
print("MAC message B:", macMessageB)
tauB = pakeClientB.computeMAC(bMacKeyB, macMessageB)
pakeClientB.createMacMsg(tauB, writeToFile=True)
print("tau_B :\n", tauB)
expected_tauA = pakeClientB.computeMAC(bMacKeyA, macMessageB)
print("expected tau_A :\n", expected_tauA)
print("----------------------------------------------------")
print("Tags match on A side: ", tauB == expected_tauB)
print("Tags match on B side: ", tauA == expected_tauA)
print("Final secret keys are: \n{0}\n{1}\nand have length {2} and {3}".format(base64.b64encode(kA),base64.b64encode(kB),len(kA), len(kB)))
print("Final secret keys match: ", kA==kB)
runCryptoExperiment(kA, b"This plaintext will be encrypted using a PAKE-generated secret key")
executionTime = (time.process_time() - start)
print("Local PakeMail execution time: ", executionTime)
def run_pake_session_over_gmail():
senderEmail = ""
receiverEmail = ""
senderPass = ""
receiverPass = ""
senderPass = "pass"
receiverPass = "pass"
if senderEmail == "":
senderEmail = input("Please enter a sender/initiator email address:")
if receiverEmail == "":
receiverEmail = input("Please enter a receiver/responder email address:")
if senderPass == "":
senderPass = getpass.getpass("Please enter the sender PAKE password:")
if receiverPass == "":
receiverPass = getpass.getpass("Please enter the receiver PAKE password:")
executionTime = float(0)
start = time.process_time()
pakeClientA = PakeClient("A", senderPass, senderEmail)
pakeClientB = PakeClient("B", receiverPass, receiverEmail)
pakeClientA.registerRemotePakeClient(pakeClientB)
pakeClientB.registerRemotePakeClient(pakeClientA)
executionTime += (time.process_time() - start)
pakeClientA.setup()
pakeClientB.setup()
start = time.process_time()
pakeClientB.pakeMail.setID(pakeClientA.pakeMail.id, messageType="pakeMac")
pakeClientB.pakeMail.setID(pakeClientA.pakeMail.id, messageType="pakeMessage")
executionTime += (time.process_time() - start)
t1 = Thread(target = pakeClientA.runSession)
t1.start()
t2 = Thread(target = pakeClientB.runSession)
t2.start()
t1.join()
t2.join()
executionTime += pakeClientA.executionTime
print("PAKE client Thread finished after {0} seconds...exiting".format(executionTime))
def run_pake_session_as_initiator():
print("\t*** Running a PAKE client as initiator ***")
senderEmail = input("Please enter a sender/initiator email address (i.e. yours):")
receiverEmail = input("Please enter a receiver/responder email address:")
senderPass = getpass.getpass("Please enter the sender/initiator PAKE password:")
pakeClientA = PakeClient("A", senderPass, senderEmail)
pakeClientB = PakeClient("B", senderPass, receiverEmail)
pakeClientA.registerRemotePakeClient(pakeClientB)
pakeClientB.registerRemotePakeClient(pakeClientA)
pakeClientA.setup()
t1 = Thread(target = pakeClientA.runSession)
t1.start()
t1.join()
print("Initiator thread finished...exiting")
def run_pake_session_as_responder():
print("\t*** Running a PAKE client as responder ***")
senderEmail = input("Please enter a sender/initiator email address:")
receiverEmail = input("Please enter a receiver/responder email address (i.e., yours):")
receiverPass = getpass.getpass("Please enter the receiver/responder PAKE password:")
pakeClientA = PakeClient("A", receiverPass, senderEmail)
pakeClientB = PakeClient("B", receiverPass, receiverEmail)
pakeClientA.registerRemotePakeClient(pakeClientB)
pakeClientB.registerRemotePakeClient(pakeClientA)
pakeClientB.setup()
t2 = Thread(target = pakeClientB.runSession)
t2.start()
t2.join()
print("Responder thread finished...exiting")
def displayMainMenu():
choice = ''
display_title()
while choice != 'q':
choice = get_user_choice()
display_title()
if choice == '1':
run_local_pake_test()
elif choice == '2':
run_pake_session_over_gmail()
elif choice == '3':
run_pake_session_as_initiator()
elif choice == '4':
run_pake_session_as_responder()
elif choice == '5':
pakemod.run_pure_spake2_experiment()
elif choice == 'q':
quit()
print("\nThanks for the visit. Bye.")
else:
print("\nPlease choose a value between 1 and 5.\n")
def display_title():
os.system('clear')
print("\t*********************************************************************")
print("\t*** PAKE-based authentication and key management run over email ***")
print("\t*********************************************************************")
def get_user_choice():
print("\n[1] Run a local PakeMail session.")
print("[2] Run a PakeMail session over Gmail with initiator and responder on the same machine.")
print("[3] Run a PakeMail session as initiator over Gmail.")
print("[4] Run a PakeMail session as responder over Gmail.")
print("[5] Run a pure SPAKE2 session locally.")
print("[q] Quit.")
return input("Which scenario would you like to run? ")
def quit():
print("\nQuitting...")
if __name__ == "__main__":
displayMainMenu()
|
abstracteventwrapper.py
|
import json
import threading
__author__ = 'Daniel Puschmann'
import abc
import os
from virtualisation.misc.jsonobject import JSONObject
from messagebus.rabbitmq import RabbitMQ
from virtualisation.misc.threads import QueueThread
from virtualisation.annotation.genericannotation import GenericAnnotation
from virtualisation.triplestore.threadedtriplestoreadapter import ThreadedTriplestoreAdapter
import zipfile
class AbstractEventWrapper(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.config = JSONObject(file(os.path.join(os.path.dirname(__file__), "..", "config.json"), "rb"))
self.host = self.config.rabbitmq.host
self.port = self.config.rabbitmq.port
# self.rabbitmqconnection, self.rabbitmqchannel = RabbitMQ.establishConnection(self.host, self.port)
self.messageBusReceiveQueue = QueueThread(handler=self.receiveEventHandler)
self.messageBusSendQueue = QueueThread(handler=self.sendAnnotatedEventHandler)
self.wrappers = []
self.splitters = None
self.annotator = GenericAnnotation()
self.exchange = RabbitMQ.exchange_annotated_event
@abc.abstractmethod
def getEventDescription(self):
"""
:return: a event description
"""
pass
@classmethod
def getFileObject(cls, currentfile, filename, mode="r"):
parent = os.path.dirname(currentfile)
if parent.endswith(".zip"):
zFile = zipfile.ZipFile(parent)
return zFile.open(filename, mode)
else:
return file(os.path.join(parent, filename), mode)
# def addWrapper(self, wrapper):
# """
# adds a wrapper to the internal wrapper list
# :param wrapper:
# :return:
# """
# if not isinstance(wrapper, AbstractEventWrapper):
# raise Exception(error="trying to add a wrapper of the wrong instance. Requires AbstractEventWRapper")
# self.wrappers.append(wrapper)
def start(self):
"@Daniel P: The ResourceManagement declares all available exchanges. I guess this is unnecessary therefore."
# RabbitMQ.declareExchange(self.rabbitmqchannel, self.exchange, _type="topic")
queue = RabbitMQ.channel.queue_declare()
queue_name = queue.method.queue
# in the following line the exchange should be RabbitMQ.exchange_event
RabbitMQ.channel.queue_bind(exchange=self.exchange, queue=queue_name,
routing_key=self.getEventDescription().messagebus.routingKey)
def run(self):
"""
start listening on the event detection component
:return:
"""
# self.__forEachWrapper("run")
self.runthread = threading.Thread(target=self._run)
self.runthread.start()
def _run(self):
self.channel.basic_consume(self.receiveEventHandler, no_ack=True)
self.channel.start_consuming()
def receiveEventHandler(self, channel, method, properties, body):
"""
Receives messages throught the message bus, annotates the event
and sends the annotated event
:param channel:
:param method:
:param properties:
:param body:
:return:
"""
event = json.loads(body)
annotatedevent = self.annotateEvent(event)
self.messageBusSendQueue.add(annotatedevent)
def annotateEvent(self, event):
"""
Annotates the event and saves the graph in the triple store
:param event:
:return: returns the annotated graph of the event
"""
graph = self.annotator.annotateEvent(event, self.getEventDescription())
ThreadedTriplestoreAdapter.getOrMake(self.getEventDescription().graphname)
return graph
def sendAnnotatedEventHandler(self, annotatedevent):
key = self.getEventDescription().messagebus.routingKey
message = annotatedevent.serialize(format='n3')
RabbitMQ.sendMessage(message, self.exchange, key)
|
mainprocess.py
|
import logging
import multiprocessing
import os
import signal
import sysv_ipc
from shmdemo.common import configure_logging
from shmdemo.serviceprocess import indexer
from shmdemo.workerprocess import worker
SHM_KEY = None
SHM_SIZE = 1024 * 1024 * 16
def main():
configure_logging()
logger = logging.getLogger('main')
logger.info("Start main process")
pid = os.getpid()
shmlock = multiprocessing.Lock()
shm = sysv_ipc.SharedMemory(
SHM_KEY, flags=sysv_ipc.IPC_CREX, mode=0o600, size=SHM_SIZE)
try:
shmlock.acquire()
for unused in range(4):
worker_process = multiprocessing.Process(
target=worker, args=(pid, shm.key, shmlock), daemon=True)
worker_process.start()
indexer_process = multiprocessing.Process(
target=indexer, args=(pid, shm.key, shmlock), daemon=True)
indexer_process.start()
finally:
shm.detach()
try:
signal.pause()
except KeyboardInterrupt:
pass
logger.info("Stopping main process")
|
mqtt_receivers.py
|
import threading
import os
import sys
from Utilities.functions import mqtt_receiving
cryptos = ['BTC', 'LTC', 'ETH', 'XRP']
output_file_path = os.path.join(os.getcwd(), 'Data', 'Output', 'Prices')
threads = list()
for crypto in cryptos:
thread = threading.Thread(target=mqtt_receiving, args=(crypto, output_file_path))
threads.append(thread)
thread.start()
for _, thread in enumerate(threads):
thread.join()
sys.exit()
|
utils.py
|
import os
import sys
import time
import datetime
import json
import copy
import threading
import multiprocessing
import queue
import socket
import importlib
import traceback
import signal
import gc
from pandacommon.pandalogger import logger_utils
from pandaserver.config import panda_config, daemon_config
# list of signals accepted to end the main process
END_SIGNALS = [
signal.SIGINT,
signal.SIGHUP,
signal.SIGTERM,
]
# mandatory attributes and thier type of daemon
MANDATORY_ATTRS = [
('module', str),
('period', int),
('arguments', list),
]
# command to send in pipe to stop daemon worker processes
CMD_STOP = '__STOP'
# epoch datetime
EPOCH = datetime.datetime.fromtimestamp(0)
# worker process loop of daemon
def daemon_loop(dem_config, msg_queue, pipe_conn, worker_lifetime, tbuf=None):
# pid of the worker
my_pid = os.getpid()
my_full_pid = '{0}-{1}-{2}'.format(socket.getfqdn().split('.')[0], os.getpgrp(), my_pid)
# logger to log in file
base_logger = logger_utils.setup_logger('daemons')
tmp_log = logger_utils.make_logger(base_logger, 'worker_pid={pid}'.format(pid=my_pid))
tmp_log.info('daemon worker start')
# signal handler
def got_end_sig(sig, frame):
tmp_log.warning('(got signal {sig})'.format(sig=sig))
for sig in END_SIGNALS:
signal.signal(sig, got_end_sig)
# dict of all daemons and their script module object
module_map = {}
# package of daemon scripts
mod_package = getattr(daemon_config, 'package')
# start timestamp
start_ts = time.time()
# expiry time
expiry_ts = start_ts + worker_lifetime
# create taskBuffer object if not given
if tbuf is None:
# initialize cx_Oracle using dummy connection
try:
from pandaserver.taskbuffer.Initializer import initializer
initializer.init()
except Exception as e:
tmp_log.error('failed to launch initializer with {err} ; terminated'.format(
err='{0}: {1}'.format(e.__class__.__name__, e)))
return
# taskBuffer object
try:
from pandaserver.taskbuffer.TaskBuffer import taskBuffer as tbuf
tbuf.init(panda_config.dbhost, panda_config.dbpasswd, nDBConnection=1)
tmp_log.debug('taskBuffer initialized')
except Exception as e:
tmp_log.error('failed to initialize taskBuffer with {err} ; terminated'.format(
err='{0}: {1}'.format(e.__class__.__name__, e)))
return
# import module of all daemons
for dem_name, attrs in dem_config.items():
mod_name = attrs['module']
try:
the_module = importlib.import_module('.{mod}'.format(mod=mod_name), mod_package)
module_map[dem_name] = the_module
except Exception as e:
tmp_log.warning('for daemon {dem}, failed to import {mod} with {err} ; skipped it'.format(
dem=dem_name, mod=mod_name, err='{0}: {1}'.format(e.__class__.__name__, e)))
else:
module_map[dem_name] = the_module
tmp_log.debug('initialized, running')
# loop
while True:
# stop the worker since when reaches its lifetime
if time.time() > expiry_ts:
tmp_log.info('worker reached its lifetime, stop this worker')
break
# get command from pipe
if pipe_conn.poll():
cmd = pipe_conn.recv()
if cmd == CMD_STOP:
# got stop command, stop the process
tmp_log.info('got stop command, stop this worker')
break
else:
tmp_log.debug('got invalid command "{cmd}" ; skipped it'.format(cmd=cmd))
# clean up memory
gc.collect()
# get a message from queue
tmp_log.debug('waiting for message...')
keep_going = True
one_msg = None
while True:
try:
one_msg = msg_queue.get(timeout=5)
break
except queue.Empty:
# timeout to get from queue, check whether to keep going
if time.time() > expiry_ts:
# worker expired, do not keep going
keep_going = False
break
# keep going
if not keep_going:
continue
# process message
if one_msg in module_map and one_msg is not None:
# got a daemon name, get the module object and corresponding attributes
dem_name = one_msg
tmp_log.debug('got message of {dem}'.format(dem=dem_name))
the_module = module_map[dem_name]
attrs = dem_config[dem_name]
mod_args = attrs['arguments']
mod_argv = tuple([__file__] + mod_args)
dem_period = attrs['period']
dem_period_in_minute = dem_period/60.
is_sync = attrs['sync']
is_loop = attrs['loop']
# initialize variables
to_run_daemon = False
has_run = False
last_run_start_ts = 0
last_run_end_ts = 0
# component name in lock table
component = 'pandaD.{dem}'.format(dem=dem_name)
# whether the daemon shoule be synchronized among nodes
if is_sync:
# sychronized daemon, check process lock in DB
ret_val, locked_time = tbuf.checkProcessLock_PANDA(component=component, pid=my_full_pid, time_limit=dem_period_in_minute)
if ret_val:
# locked by some process on other nodes
last_run_start_ts = int((locked_time - EPOCH).total_seconds())
tmp_log.debug('found {dem} is locked by other process ; skipped it'.format(dem=dem_name))
else:
# try to get the lock
got_lock = tbuf.lockProcess_PANDA(component=component, pid=my_full_pid, time_limit=dem_period_in_minute)
if got_lock:
# got the lock
to_run_daemon = True
tmp_log.debug('got lock of {dem}'.format(dem=dem_name))
else:
# did not get lock, skip
last_run_start_ts = int(time.time())
tmp_log.debug('did not get lock of {dem} ; skipped it'.format(dem=dem_name))
else:
to_run_daemon = True
# run daemon
if to_run_daemon:
last_run_start_ts = int(time.time())
try:
if is_loop:
# go looping the script until reaching daemon period
tmp_log.info('{dem} start looping'.format(dem=dem_name))
start_ts = time.time()
while True:
ret_val = the_module.main(argv=mod_argv, tbuf=tbuf)
now_ts = time.time()
if not ret_val:
# daemon main function says stop the loop
break
if now_ts > start_ts + dem_period:
# longer than the period, stop the loop
break
tmp_log.info('{dem} finish looping'.format(dem=dem_name))
else:
# execute the module script with arguments
tmp_log.info('{dem} start'.format(dem=dem_name))
the_module.main(argv=mod_argv, tbuf=tbuf)
tmp_log.info('{dem} finish'.format(dem=dem_name))
except Exception as e:
# with error
tb = traceback.format_exc()
tmp_log.error('failed to run daemon {dem} with {err} ; stop this worker'.format(
dem=dem_name, err='{0}: {1}\n{2}\n'.format(e.__class__.__name__, e, tb)))
# daemon has run but failed
last_run_end_ts = int(time.time())
has_run = True
# send daemon status back to master
status_tuple = (dem_name, has_run, last_run_start_ts, last_run_end_ts)
pipe_conn.send(status_tuple)
# stop the worker
break
else:
# daemon has run
last_run_end_ts = int(time.time())
has_run = True
# send daemon status back to master
status_tuple = (dem_name, has_run, last_run_start_ts, last_run_end_ts)
pipe_conn.send(status_tuple)
# FIXME: stop and spawn worker in every run for now since some script breaks the worker without exception
# tmp_log.info('as script done, stop this worker')
# break
else:
# got invalid message
tmp_log.warning('got invalid message "{msg}", skipped it'.format(msg=one_msg))
# sleep
time.sleep(2**-5)
# worker class of daemon process for PanDA server
class DaemonWorker(object):
__slots__ = (
'wid',
'parent_conn',
'child_conn',
'process',
)
# class lock
_lock = threading.Lock()
# constructor
def __init__(self, dem_config, msg_queue, worker_lifetime, tbuf=None):
# synchronized with lock
with self._lock:
self._make_pipe()
self._make_process( dem_config=dem_config,
msg_queue=msg_queue,
worker_lifetime=worker_lifetime,
tbuf=tbuf)
# make pipe connection pairs for the worker
def _make_pipe(self):
self.parent_conn, self.child_conn = multiprocessing.Pipe()
# make associated process
def _make_process(self, dem_config, msg_queue, worker_lifetime, tbuf):
args = (dem_config, msg_queue, self.child_conn, worker_lifetime, tbuf)
self.process = multiprocessing.Process(target=daemon_loop, args=args)
# start worker process
def start(self):
self.process.start()
# whether worker process is alive
def is_alive(self):
return self.process.is_alive()
# master class of main daemon process for PanDA server
class DaemonMaster(object):
# constructor
def __init__(self, logger, n_workers=1, n_dbconn=1, worker_lifetime=28800):
# logger
self.logger = logger
# number of daemon worker processes
self.n_workers = n_workers
# number of db connections for common taskBuffer interface
self.n_dbconn = n_dbconn
# lifetime of daemon worker processes
self.worker_lifetime = worker_lifetime
# locks
self._worker_lock = threading.Lock()
self._status_lock = threading.Lock()
# make message queue
self.msg_queue = multiprocessing.Queue()
# process pool
self.proc_pool = []
# worker pool
self.worker_pool = set()
# whether to stop scheduler
self.to_stop_scheduler = False
# make daemon config
self.dem_config = {}
self._parse_config()
# map of run status of daemons
self.dem_run_map = {}
self._make_dem_run_map()
# shared taskBufferIF
self.tbif = None
self._make_tbif()
# spawn workers
self._spawn_workers(self.n_workers)
# make common taskBuffer interface for daemon workers
def _make_tbif(self):
try:
from pandaserver.taskbuffer.TaskBuffer import TaskBuffer
from pandaserver.taskbuffer.TaskBufferInterface import TaskBufferInterface
# taskBuffer
_tbuf = TaskBuffer()
_tbuf.init(panda_config.dbhost, panda_config.dbpasswd, nDBConnection=self.n_dbconn)
# taskBuffer interface for multiprocessing
taskBufferIF = TaskBufferInterface()
taskBufferIF.launch(_tbuf)
self.logger.debug('taskBuffer interface initialized')
self.tbif = taskBufferIF
except Exception as e:
self.logger.error('failed to initialize taskBuffer interface with {err} ; terminated'.format(
err='{0}: {1}'.format(e.__class__.__name__, e)))
raise e
# spawn new workers and put into worker pool
def _spawn_workers(self, n_workers=1, auto_start=False):
for j in range(n_workers):
with self._worker_lock:
worker = DaemonWorker( dem_config=self.dem_config,
msg_queue=self.msg_queue,
worker_lifetime=self.worker_lifetime,
tbuf=self.tbif.getInterface())
self.worker_pool.add(worker)
if auto_start:
worker.start()
# remove a worker from pool
def _remove_worker(self, worker):
with self._worker_lock:
self.worker_pool.discard(worker)
# parse daemon config
def _parse_config(self):
try:
config_json = daemon_config.config
config_dict = json.loads(config_json)
self.dem_config = copy.deepcopy(config_dict)
# loop over daemons
for dem_name, attrs in config_dict.items():
# remove disabled daemons
if 'enable' in attrs and attrs['enable'] is False:
del self.dem_config[dem_name]
continue
# handle option attributes
if 'module' not in attrs:
self.dem_config[dem_name]['module'] = dem_name
if 'arguments' not in attrs:
self.dem_config[dem_name]['arguments'] = []
if 'sync' not in attrs:
self.dem_config[dem_name]['sync'] = False
if 'loop' not in attrs:
self.dem_config[dem_name]['loop'] = False
# check mandatory attributes
the_attrs = copy.deepcopy(self.dem_config[dem_name])
for attr, attr_type in MANDATORY_ATTRS:
if attr not in the_attrs:
self.logger.warning('daemon config missing attribute "{attr}" for {dem} ; skipped'.format(
attr=attr, dem=dem_name))
del self.dem_config[dem_name]
break
elif not isinstance(the_attrs[attr], attr_type):
self.logger.warning('daemon config has invalid type of attribute "{attr}" for {dem} (type must be {typ}) ; skipped'.format(
attr=attr, dem=dem_name, typ=attr_type.__name__))
del self.dem_config[dem_name]
break
except Exception as e:
tb = traceback.format_exc()
self.logger.error('failed to parse daemon config, {err}'.format(
err='{0}: {1}\n{2}\n'.format(e.__class__.__name__, e, tb)))
# make daemon run status map
def _make_dem_run_map(self):
dem_run_map = {}
for dem in self.dem_config:
attrs = {}
attrs['last_run_start_ts'] = 0
attrs['last_warn_ts'] = 0
attrs['msg_ongoing'] = False
dem_run_map[dem] = attrs
self.dem_run_map = dem_run_map
# one scheduler cycle
def _scheduler_cycle(self):
now_ts = int(time.time())
# check last run time from pipes
for worker in list(self.worker_pool):
# remove dead worker from worker pool
if not worker.is_alive():
self._remove_worker(worker)
# lock daemon run status
with self._status_lock:
# get message from the worker
while worker.parent_conn.poll():
dem_name, has_run, last_run_start_ts, last_run_end_ts = worker.parent_conn.recv()
# update run status map
dem_run_attrs = self.dem_run_map[dem_name]
old_last_run_start_ts = dem_run_attrs['last_run_start_ts']
if last_run_start_ts > old_last_run_start_ts:
# take latest timestamp of run start
dem_run_attrs['last_run_start_ts'] = last_run_start_ts
if has_run and last_run_end_ts >= last_run_start_ts:
run_duration = last_run_end_ts - last_run_start_ts
run_period = self.dem_config[dem_name].get('period')
is_loop = self.dem_config[dem_name].get('loop')
if run_duration > run_period and not is_loop:
# warning since daemon run duration longer than daemon period (non-looping)
self.logger.warning('daemon {dem} took {dur} sec , longer than its period {period} sec'.format(
dem=dem_name, dur=run_duration, period=run_period))
dem_run_attrs['msg_ongoing'] = False
# send message to workers
for dem_name, attrs in self.dem_config.items():
run_period = attrs.get('period')
dem_run_attrs = self.dem_run_map[dem_name]
last_run_start_ts = dem_run_attrs['last_run_start_ts']
last_warn_ts = dem_run_attrs['last_warn_ts']
if run_period is None or last_run_start_ts is None:
continue
if last_run_start_ts + run_period <= now_ts:
# time to send new message to run the daemon
with self._status_lock:
dem_run_attrs = self.dem_run_map[dem_name]
msg_ongoing = dem_run_attrs['msg_ongoing']
if msg_ongoing:
# old message not processed yet, maybe daemon still running, skip
run_delay = now_ts - (last_run_start_ts + run_period)
warn_since_ago = now_ts - last_warn_ts
if last_run_start_ts > 0 \
and run_delay > max(300, run_period//2) \
and warn_since_ago > 900:
# make warning if delay too much
self.logger.warning('{dem} delayed to run for {delay} sec '.format(
dem=dem_name, delay=run_delay))
dem_run_attrs['last_warn_ts'] = now_ts
else:
# old message processed, send new message
self.msg_queue.put(dem_name)
self.logger.debug('scheduled to run {dem}'.format(
dem=dem_name))
dem_run_attrs['msg_ongoing'] = True
# dem_run_attrs['last_run_start_ts'] = now_ts
# spawn new workers if ther are less than n_workers
now_n_workers = len(self.worker_pool)
if now_n_workers < self.n_workers:
n_up = self.n_workers - now_n_workers
self._spawn_workers(n_workers=n_up, auto_start=True)
# sleep
time.sleep(0.5)
# send stop command to all worker processes
def _stop_proc(self):
for worker in self.worker_pool:
worker.parent_conn.send(CMD_STOP)
# stop master
def stop(self):
self.logger.info('daemon master got stop')
# stop scheduler from sending more message
self.to_stop_scheduler = True
# send stop command to workers
self._stop_proc()
# stop taskBuffer interface
self.tbif.stop()
# wait a bit
time.sleep(2.5)
# run
def run(self):
# pid
pid = os.getpid()
self.logger.info('daemon master started ; pid={pid}'.format(pid=pid))
# start daemon workers
for worker in self.worker_pool:
worker.start()
self.logger.debug('daemon master launched all worker processes')
# loop of scheduler
while not self.to_stop_scheduler:
self._scheduler_cycle()
# end
self.logger.info('daemon master ended')
|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Electrum and %s encryption and decryption are currently incompatible') % self.device)
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
payload = base64.b64decode(message)
nonce, message, msg_hmac = payload[:33], payload[33:-8], payload[-8:]
result = client.decrypt_message(address_n, nonce, message, msg_hmac)
return result.message
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex')[:-1] if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_sender.py
|
from __future__ import print_function
import os
import pytest
import six
from six.moves import queue
import threading
import time
import shutil
import sys
import wandb
from wandb.util import mkdir_exists_ok
# TODO: consolidate dynamic imports
PY3 = sys.version_info.major == 3 and sys.version_info.minor >= 6
if PY3:
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.interface.interface import BackendSender
else:
from wandb.sdk_py27.internal.handler import HandleManager
from wandb.sdk_py27.internal.sender import SendManager
from wandb.sdk_py27.interface.interface import BackendSender
from wandb.proto import wandb_internal_pb2
from wandb.proto import wandb_internal_pb2 as pb
from .utils import first_filestream
def test_send_status_request_stopped(mock_server, internal_sender, start_backend):
mock_server.ctx["stopped"] = True
start_backend()
status_resp = internal_sender.communicate_stop_status()
assert status_resp is not None
assert status_resp.run_should_stop
def test_parallel_requests(mock_server, internal_sender, start_backend):
mock_server.ctx["stopped"] = True
work_queue = queue.Queue()
start_backend()
def send_sync_request(i):
work_queue.get()
if i % 3 == 0:
status_resp = internal_sender.communicate_stop_status()
assert status_resp is not None
assert status_resp.run_should_stop
elif i % 3 == 2:
summary_resp = internal_sender.communicate_summary()
assert summary_resp is not None
assert hasattr(summary_resp, "item")
work_queue.task_done()
for i in range(10):
work_queue.put(None)
t = threading.Thread(target=send_sync_request, args=(i,))
t.daemon = True
t.start()
work_queue.join()
def test_send_status_request_network(mock_server, internal_sender, start_backend):
mock_server.ctx["rate_limited_times"] = 3
start_backend()
internal_sender.publish_files({"files": [("test.txt", "live")]})
status_resp = internal_sender.communicate_network_status()
assert status_resp is not None
assert len(status_resp.network_responses) > 0
assert status_resp.network_responses[0].http_status_code == 429
def test_resume_success(
mocked_run, test_settings, mock_server, internal_sender, start_backend
):
test_settings.resume = "allow"
mock_server.ctx["resume"] = True
start_backend(initial_run=False)
run_result = internal_sender.communicate_run(mocked_run)
assert run_result.HasField("error") is False
assert run_result.run.starting_step == 16
def test_resume_error_never(
mocked_run, test_settings, mock_server, internal_sender, start_backend
):
test_settings.resume = "never"
mock_server.ctx["resume"] = True
start_backend(initial_run=False)
run_result = internal_sender.communicate_run(mocked_run)
assert run_result.HasField("error")
assert (
run_result.error.message == "resume='never' but run (%s) exists" % mocked_run.id
)
def test_resume_error_must(
mocked_run, test_settings, mock_server, internal_sender, start_backend
):
test_settings.resume = "must"
mock_server.ctx["resume"] = False
start_backend(initial_run=False)
run_result = internal_sender.communicate_run(mocked_run)
assert run_result.HasField("error")
assert (
run_result.error.message
== "resume='must' but run (%s) doesn't exist" % mocked_run.id
)
def test_save_live_existing_file(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
internal_sender.publish_files({"files": [("test.txt", "live")]})
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_live_write_after_policy(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "live")]})
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_preempting_sent_to_server(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_preempting()
stop_backend()
assert any(
[
"preempting" in request_dict
for request_dict in mock_server.ctx["file_stream"]
]
)
def test_save_live_multi_write(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "live")]})
test_file = os.path.join(mocked_run.dir, "test.txt")
with open(test_file, "w") as f:
f.write("TEST TEST")
# File system polling happens every second
time.sleep(1.5)
with open(test_file, "w") as f:
f.write("TEST TEST TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 2
def test_save_live_glob_multi_write(
mocked_run, mock_server, internal_sender, start_backend, stop_backend, mocker
):
def mock_min_size(self, size):
return 1
mocker.patch("wandb.filesync.dir_watcher.PolicyLive.RATE_LIMIT_SECONDS", 1)
mocker.patch(
"wandb.filesync.dir_watcher.PolicyLive.min_wait_for_size", mock_min_size
)
start_backend()
internal_sender.publish_files({"files": [("checkpoints/*", "live")]})
mkdir_exists_ok(os.path.join(mocked_run.dir, "checkpoints"))
test_file_1 = os.path.join(mocked_run.dir, "checkpoints", "test_1.txt")
test_file_2 = os.path.join(mocked_run.dir, "checkpoints", "test_2.txt")
# To debug this test adds some prints to the dir_watcher.py _on_file_* handlers
print("Wrote file 1")
with open(test_file_1, "w") as f:
f.write("TEST TEST")
time.sleep(2)
print("Wrote file 1 2nd time")
with open(test_file_1, "w") as f:
f.write("TEST TEST TEST TEST")
# File system polling happens every second
time.sleep(1.5)
print("Wrote file 2")
with open(test_file_2, "w") as f:
f.write("TEST TEST TEST TEST")
print("Wrote file 1 3rd time")
with open(test_file_1, "w") as f:
f.write("TEST TEST TEST TEST TEST TEST")
print("Stopping backend")
stop_backend()
print("Backend stopped")
print(
"CTX:", [(k, v) for k, v in mock_server.ctx.items() if k.startswith("storage")]
)
assert len(mock_server.ctx["storage?file=checkpoints/test_1.txt"]) == 3
assert len(mock_server.ctx["storage?file=checkpoints/test_2.txt"]) == 1
def test_save_rename_file(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "live")]})
test_file = os.path.join(mocked_run.dir, "test.txt")
with open(test_file, "w") as f:
f.write("TEST TEST")
# File system polling happens every second
time.sleep(1.5)
shutil.copy(test_file, test_file.replace("test.txt", "test-copy.txt"))
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
assert len(mock_server.ctx["storage?file=test-copy.txt"]) == 1
def test_save_end_write_after_policy(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "end")]})
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_end_existing_file(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
internal_sender.publish_files({"files": [("test.txt", "end")]})
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_end_multi_write(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "end")]})
test_file = os.path.join(mocked_run.dir, "test.txt")
with open(test_file, "w") as f:
f.write("TEST TEST")
# File system polling happens every second
time.sleep(1.5)
with open(test_file, "w") as f:
f.write("TEST TEST TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_now_write_after_policy(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "now")]})
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_now_existing_file(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
with open(os.path.join(mocked_run.dir, "test.txt"), "w") as f:
f.write("TEST TEST")
internal_sender.publish_files({"files": [("test.txt", "now")]})
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_now_multi_write(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("test.txt", "now")]})
test_file = os.path.join(mocked_run.dir, "test.txt")
with open(test_file, "w") as f:
f.write("TEST TEST")
# File system polling happens every second
time.sleep(1.5)
with open(test_file, "w") as f:
f.write("TEST TEST TEST TEST")
stop_backend()
assert len(mock_server.ctx["storage?file=test.txt"]) == 1
def test_save_glob_multi_write(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("checkpoints/*", "now")]})
mkdir_exists_ok(os.path.join(mocked_run.dir, "checkpoints"))
test_file_1 = os.path.join(mocked_run.dir, "checkpoints", "test_1.txt")
test_file_2 = os.path.join(mocked_run.dir, "checkpoints", "test_2.txt")
print("Wrote file 1")
with open(test_file_1, "w") as f:
f.write("TEST TEST")
# File system polling happens every second
time.sleep(1.5)
print("Wrote file 2")
with open(test_file_2, "w") as f:
f.write("TEST TEST TEST TEST")
time.sleep(1.5)
print("Stopping backend")
stop_backend()
print("Backend stopped")
print(
"CTX", [(k, v) for k, v in mock_server.ctx.items() if k.startswith("storage")]
)
assert len(mock_server.ctx["storage?file=checkpoints/test_1.txt"]) == 1
assert len(mock_server.ctx["storage?file=checkpoints/test_2.txt"]) == 1
def test_save_now_relative_path(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
internal_sender.publish_files({"files": [("foo/test.txt", "now")]})
test_file = os.path.join(mocked_run.dir, "foo", "test.txt")
mkdir_exists_ok(os.path.dirname(test_file))
with open(test_file, "w") as f:
f.write("TEST TEST")
stop_backend()
print("DAMN DUDE", mock_server.ctx)
assert len(mock_server.ctx["storage?file=foo/test.txt"]) == 1
def test_save_now_twice(
mocked_run, mock_server, internal_sender, start_backend, stop_backend
):
start_backend()
file_path = os.path.join("foo", "test.txt")
internal_sender.publish_files({"files": [(file_path, "now")]})
test_file = os.path.join(mocked_run.dir, file_path)
mkdir_exists_ok(os.path.dirname(test_file))
with open(test_file, "w") as f:
f.write("TEST TEST")
time.sleep(1.5)
with open(test_file, "w") as f:
f.write("TEST TEST TEST TEST")
internal_sender.publish_files({"files": [(file_path, "now")]})
stop_backend()
print("DAMN DUDE", mock_server.ctx)
assert len(mock_server.ctx["storage?file=foo/test.txt"]) == 2
def test_output(mocked_run, mock_server, internal_sender, start_backend, stop_backend):
start_backend()
for i in range(100):
internal_sender.publish_output("stdout", "\rSome recurring line")
internal_sender.publish_output("stdout", "\rFinal line baby\n")
stop_backend()
print("DUDE!", mock_server.ctx)
stream = first_filestream(mock_server.ctx)
assert "Final line baby" in stream["files"]["output.log"]["content"][0]
def test_sync_spell_run(
mocked_run, mock_server, internal_sender, start_backend, stop_backend, parse_ctx
):
try:
os.environ["SPELL_RUN_URL"] = "https://spell.run/foo"
start_backend()
stop_backend()
print("CTX", mock_server.ctx)
ctx = parse_ctx(mock_server.ctx)
assert ctx.config["_wandb"]["value"]["spell_url"] == "https://spell.run/foo"
# Check that we pinged spells API
assert mock_server.ctx["spell_data"] == {
"access_token": None,
"url": "{}/mock_server_entity/test/runs/{}".format(
mocked_run._settings.base_url, mocked_run.id
),
}
finally:
del os.environ["SPELL_RUN_URL"]
def test_upgrade_upgraded(
mocked_run,
mock_server,
internal_sender,
start_backend,
stop_backend,
restore_version,
):
wandb.__version__ = "0.0.6"
wandb.__hack_pypi_latest_version__ = "0.0.8"
start_backend(initial_run=False)
ret = internal_sender.communicate_check_version()
assert ret
assert (
ret.upgrade_message
== "wandb version 0.0.8 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
)
assert not ret.delete_message
assert not ret.yank_message
def test_upgrade_yanked(
mocked_run,
mock_server,
internal_sender,
start_backend,
stop_backend,
restore_version,
):
wandb.__version__ = "0.0.2"
wandb.__hack_pypi_latest_version__ = "0.0.8"
start_backend(initial_run=False)
ret = internal_sender.communicate_check_version()
assert ret
assert (
ret.upgrade_message
== "wandb version 0.0.8 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
)
assert not ret.delete_message
assert ret.yank_message == "wandb version 0.0.2 has been recalled! Please upgrade."
def test_upgrade_yanked_message(
mocked_run,
mock_server,
internal_sender,
start_backend,
stop_backend,
restore_version,
):
wandb.__version__ = "0.0.3"
wandb.__hack_pypi_latest_version__ = "0.0.8"
start_backend(initial_run=False)
ret = internal_sender.communicate_check_version()
assert ret
assert (
ret.upgrade_message
== "wandb version 0.0.8 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
)
assert not ret.delete_message
assert (
ret.yank_message
== "wandb version 0.0.3 has been recalled! (just cuz) Please upgrade."
)
def test_upgrade_removed(
mocked_run,
mock_server,
internal_sender,
start_backend,
stop_backend,
restore_version,
):
wandb.__version__ = "0.0.4"
wandb.__hack_pypi_latest_version__ = "0.0.8"
start_backend(initial_run=False)
ret = internal_sender.communicate_check_version()
assert ret
assert (
ret.upgrade_message
== "wandb version 0.0.8 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
)
assert (
ret.delete_message == "wandb version 0.0.4 has been retired! Please upgrade."
)
assert not ret.yank_message
# TODO: test other sender methods
|
GUI.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# File name : client.py
# Description : client
# Website : www.adeept.com
# E-mail : support@adeept.com
# Author : William
# Date : 2018/08/22
#
import cv2
import zmq
import base64
import numpy as np
from socket import *
import sys
import time
import threading as thread
import tkinter as tk
ip_stu=1 #Shows connection status
c_f_stu = 0
c_b_stu = 0
c_l_stu = 0
c_r_stu = 0
c_ls_stu= 0
c_rs_stu= 0
funcMode= 0
tcpClicSock = ''
root = ''
stat = 0
ultra_data = 'Ultrasonic OFF'
########>>>>>VIDEO<<<<<########
def video_thread():
global footage_socket, font, frame_num, fps
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
font = cv2.FONT_HERSHEY_SIMPLEX
frame_num = 0
fps = 0
def get_FPS():
global frame_num, fps
while 1:
try:
time.sleep(1)
fps = frame_num
frame_num = 0
except:
time.sleep(1)
def opencv_r():
global frame_num
while True:
try:
frame = footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.frombuffer(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.putText(source,('PC FPS: %s'%fps),(40,20), font, 0.5,(255,255,255),1,cv2.LINE_AA)
try:
cv2.putText(source,('CPU Temperature: %s'%CPU_TEP),(370,350), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('CPU Usage: %s'%CPU_USE),(370,380), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('RAM Usage: %s'%RAM_USE),(370,410), font, 0.5,(128,255,128),1,cv2.LINE_AA)
if ultrasonicMode == 1:
cv2.line(source,(320,240),(260,300),(255,255,255),1)
cv2.line(source,(210,300),(260,300),(255,255,255),1)
cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
except:
pass
#cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
cv2.imshow("Stream", source)
frame_num += 1
cv2.waitKey(1)
except:
time.sleep(0.5)
break
fps_threading=thread.Thread(target=get_FPS) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
video_threading=thread.Thread(target=video_thread) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
########>>>>>VIDEO<<<<<########
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
newline=""
str_num=str(new_num)
with open("ip.txt","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num)
newline += line
with open("ip.txt","w") as f:
f.writelines(newline) #Call this function to replace data in '.txt' file
def num_import(initial): #Call this function to import data from '.txt' file
with open("ip.txt") as f:
for line in f.readlines():
if(line.find(initial) == 0):
r=line
begin=len(list(initial))
snum=r[begin:]
n=snum
return n
def call_forward(event): #When this function is called,client commands the car to move forward
global c_f_stu
if c_f_stu == 0:
tcpClicSock.send(('forward').encode())
c_f_stu=1
def call_back(event): #When this function is called,client commands the car to move backward
global c_b_stu
if c_b_stu == 0:
tcpClicSock.send(('backward').encode())
c_b_stu=1
def call_FB_stop(event): #When this function is called,client commands the car to stop moving
global c_f_stu,c_b_stu,c_l_stu,c_r_stu,c_ls_stu,c_rs_stu
c_f_stu=0
c_b_stu=0
tcpClicSock.send(('DS').encode())
def call_Turn_stop(event): #When this function is called,client commands the car to stop moving
global c_f_stu,c_b_stu,c_l_stu,c_r_stu,c_ls_stu,c_rs_stu
c_l_stu=0
c_r_stu=0
c_ls_stu=0
c_rs_stu=0
tcpClicSock.send(('TS').encode())
def call_Left(event): #When this function is called,client commands the car to turn left
global c_l_stu
if c_l_stu == 0:
tcpClicSock.send(('left').encode())
c_l_stu=1
def call_Right(event): #When this function is called,client commands the car to turn right
global c_r_stu
if c_r_stu == 0:
tcpClicSock.send(('right').encode())
c_r_stu=1
def call_LeftSide(event):
tcpClicSock.send(('out').encode())
def call_RightSide(event):
tcpClicSock.send(('in').encode())
def call_CLeft(event):
tcpClicSock.send(('c_left').encode())
def call_CRight(event):
tcpClicSock.send(('c_right').encode())
def call_headup(event):
tcpClicSock.send(('headup').encode())
def call_headdown(event):
tcpClicSock.send(('headdown').encode())
def call_headleft(event):
tcpClicSock.send(('catch').encode())
def call_headright(event):
tcpClicSock.send(('loose').encode())
def call_headhome(event):
tcpClicSock.send(('headhome').encode())
def call_steady(event):
global ultrasonicMode
if funcMode == 0:
tcpClicSock.send(('steady').encode())
ultrasonicMode = 1
else:
tcpClicSock.send(('funEnd').encode())
def call_FindColor(event):
if funcMode == 0:
tcpClicSock.send(('FindColor').encode())
else:
tcpClicSock.send(('funEnd').encode())
def call_WatchDog(event):
if funcMode == 0:
tcpClicSock.send(('WatchDog').encode())
else:
tcpClicSock.send(('funEnd').encode())
def call_FindLine(event):
if funcMode == 0:
tcpClicSock.send(('FindLine').encode())
else:
tcpClicSock.send(('funEnd').encode())
def all_btn_red():
Btn_Steady.config(bg='#FF6D00', fg='#000000')
Btn_FindColor.config(bg='#FF6D00', fg='#000000')
Btn_WatchDog.config(bg='#FF6D00', fg='#000000')
Btn_Fun4.config(bg='#FF6D00', fg='#000000')
Btn_Fun5.config(bg='#FF6D00', fg='#000000')
Btn_Fun6.config(bg='#FF6D00', fg='#000000')
def all_btn_normal():
Btn_Steady.config(bg=color_btn, fg=color_text)
Btn_FindColor.config(bg=color_btn, fg=color_text)
Btn_WatchDog.config(bg=color_btn, fg=color_text)
Btn_Fun4.config(bg=color_btn, fg=color_text)
Btn_Fun5.config(bg=color_btn, fg=color_text)
Btn_Fun6.config(bg=color_btn, fg=color_text)
def connection_thread():
global funcMode, ultrasonicMode, canvas_rec, canvas_text
while 1:
car_info = (tcpClicSock.recv(BUFSIZ)).decode()
if not car_info:
continue
elif 'FindColor' in car_info:
funcMode = 1
all_btn_red()
Btn_FindColor.config(bg='#00E676')
elif 'steady' in car_info:
funcMode = 1
all_btn_red()
Btn_Steady.config(bg='#00E676')
elif 'WatchDog' in car_info:
funcMode = 1
all_btn_red()
Btn_WatchDog.config(bg='#00E676')
elif 'FindLine' in car_info:
funcMode = 1
all_btn_red()
Btn_Fun4.config(bg='#00E676')
elif 'FunEnd' in car_info:
funcMode = 0
all_btn_normal()
ultrasonicMode = 0
canvas_rec=canvas_ultra.create_rectangle(0,0,352,30,fill = color_btn,width=0)
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic OFF',fill=color_text)
def instruction():
instructions = []
while 1:
instruction_1 = 'You can use shortcuts to control the robot'
instructions.append(instruction_1)
instruction_2 = 'W: Forward S: Backward A: Turn left D: Turn right'
instructions.append(instruction_2)
instruction_3 = 'I: Look up K: Look down J: Grab L: Loose'
instructions.append(instruction_3)
instruction_4 = 'Q: Hand reaches out E: Hand takes back U & O: Hand rotation'
instructions.append(instruction_4)
instruction_5 = 'F(the Home button on GUI): Arm and head return to original positionl position'
instructions.append(instruction_5)
instruction_6 = 'then the PWM of servos will be set to 0'
instructions.append(instruction_6)
instruction_7 = 'for better battery and servo maintenance'
instructions.append(instruction_7)
for ins_show in instructions:
label_ins.config(text=ins_show)
time.sleep(4)
def Info_receive():
global CPU_TEP,CPU_USE,RAM_USE
HOST = ''
INFO_PORT = 2256 #Define port serial
ADDR = (HOST, INFO_PORT)
InfoSock = socket(AF_INET, SOCK_STREAM)
InfoSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
InfoSock.bind(ADDR)
InfoSock.listen(5) #Start server,waiting for client
InfoSock, addr = InfoSock.accept()
print('Info connected')
while 1:
try:
info_data = ''
info_data = str(InfoSock.recv(BUFSIZ).decode())
info_get = info_data.split()
CPU_TEP,CPU_USE,RAM_USE= info_get
#print('cpu_tem:%s\ncpu_use:%s\nram_use:%s'%(CPU_TEP,CPU_USE,RAM_USE))
CPU_TEP_lab.config(text='CPU Temp: %s℃'%CPU_TEP)
CPU_USE_lab.config(text='CPU Usage: %s'%CPU_USE)
RAM_lab.config(text='RAM Usage: %s'%RAM_USE)
except:
pass
def ultra_receive():
global ultra_data, canvas_text, canvas_rec
ultra_HOST = ''
ultra_PORT = 2257 #Define port serial
ultra_ADDR = (ultra_HOST, ultra_PORT)
ultra_Sock = socket(AF_INET, SOCK_STREAM)
ultra_Sock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
ultra_Sock.bind(ultra_ADDR)
ultra_Sock.listen(5) #Start server,waiting for client
ultra_Sock, addr = ultra_Sock.accept()
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic OFF',fill=color_text)
while 1:
try:
ultra_data = str(ultra_Sock.recv(BUFSIZ).decode())
try:
ultra_data = float(ultra_data)
if float(ultra_data) < 3:
#print(ultra_data)
try:
canvas_ultra.delete(canvas_text)
canvas_ultra.delete(canvas_rec)
except:
pass
#canvas_rec=canvas_ultra.create_rectangle(0,0,int(float(ultra_data)/145*3),30,fill = '#FFFFFF')
canvas_rec=canvas_ultra.create_rectangle(0,0,(352-int(float(ultra_data)*352/3)),30,fill = '#448AFF',width=0)
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic Output: %sm'%ultra_data,fill=color_text)
#print('xxx')
except:
pass
except:
pass
def socket_connect(): #Call this function to connect with the server
global ADDR,tcpClicSock,BUFSIZ,ip_stu,ipaddr
ip_adr=E1.get() #Get the IP address from Entry
if ip_adr == '': #If no input IP address in Entry,import a default IP
ip_adr=num_import('IP:')
l_ip_4.config(text='Connecting')
l_ip_4.config(bg='#FF8F00')
l_ip_5.config(text='Default:%s'%ip_adr)
pass
SERVER_IP = ip_adr
SERVER_PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (SERVER_IP, SERVER_PORT)
tcpClicSock = socket(AF_INET, SOCK_STREAM) #Set connection value for socket
for i in range (1,6): #Try 5 times if disconnected
if ip_stu == 1:
print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
print("Connecting")
tcpClicSock.connect(ADDR) #Connection with the server
print("Connected")
l_ip_5.config(text='IP:%s'%ip_adr)
l_ip_4.config(text='Connected')
l_ip_4.config(bg='#558B2F')
replace_num('IP:',ip_adr)
E1.config(state='disabled') #Disable the Entry
Btn14.config(state='disabled') #Disable the Entry
ip_stu=0 #'0' means connected
connection_threading=thread.Thread(target=connection_thread) #Define a thread for FPV and OpenCV
connection_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
connection_threading.start() #Thread starts
info_threading=thread.Thread(target=Info_receive) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
ultra_threading=thread.Thread(target=ultra_receive) #Define a thread for FPV and OpenCV
ultra_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ultra_threading.start() #Thread starts
video_threading=thread.Thread(target=opencv_r) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
break
else:
print("Cannot connecting to server,try it latter!")
l_ip_4.config(text='Try %d/5 time(s)'%i)
l_ip_4.config(bg='#EF6C00')
print('Try %d/5 time(s)'%i)
ip_stu=1
time.sleep(1)
continue
if ip_stu == 1:
l_ip_4.config(text='Disconnected')
l_ip_4.config(bg='#F44336')
def connect(event): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def connect_click(): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def set_R(event):
time.sleep(0.03)
tcpClicSock.send(('wsR %s'%var_R.get()).encode())
def set_G(event):
time.sleep(0.03)
tcpClicSock.send(('wsG %s'%var_G.get()).encode())
def set_B(event):
time.sleep(0.03)
tcpClicSock.send(('wsB %s'%var_B.get()).encode())
def loop(): #GUI
global tcpClicSock,root,E1,connect,l_ip_4,l_ip_5,color_btn,color_text,Btn14,CPU_TEP_lab,CPU_USE_lab,RAM_lab,canvas_ultra,color_text,var_R,var_B,var_G,Btn_Steady,Btn_FindColor,Btn_WatchDog,Btn_Fun4,Btn_Fun5,Btn_Fun6,label_ins #The value of tcpClicSock changes in the function loop(),would also changes in global so the other functions could use it.
while True:
color_bg='#000000' #Set background color
color_text='#E1F5FE' #Set text color
color_btn='#0277BD' #Set button color
color_line='#01579B' #Set line color
color_can='#212121' #Set canvas color
color_oval='#2196F3' #Set oval color
target_color='#FF6D00'
root = tk.Tk() #Define a window named root
root.title('Adeept RaspTank') #Main window title
root.geometry('565x510') #Main window size, middle of the English letter x.
root.config(bg=color_bg) #Set the background color of root window
try:
logo =tk.PhotoImage(file = 'logo.png') #Define the picture of logo,but only supports '.png' and '.gif'
l_logo=tk.Label(root,image = logo,bg=color_bg) #Set a label to show the logo picture
l_logo.place(x=30,y=13) #Place the Label in a right position
except:
pass
CPU_TEP_lab=tk.Label(root,width=18,text='CPU Temp:',fg=color_text,bg='#212121')
CPU_TEP_lab.place(x=400,y=15) #Define a Label and put it in position
CPU_USE_lab=tk.Label(root,width=18,text='CPU Usage:',fg=color_text,bg='#212121')
CPU_USE_lab.place(x=400,y=45) #Define a Label and put it in position
RAM_lab=tk.Label(root,width=18,text='RAM Usage:',fg=color_text,bg='#212121')
RAM_lab.place(x=400,y=75) #Define a Label and put it in position
l_ip=tk.Label(root,width=18,text='Status',fg=color_text,bg=color_btn)
l_ip.place(x=30,y=110) #Define a Label and put it in position
l_ip_4=tk.Label(root,width=18,text='Disconnected',fg=color_text,bg='#F44336')
l_ip_4.place(x=400,y=110) #Define a Label and put it in position
l_ip_5=tk.Label(root,width=18,text='Use default IP',fg=color_text,bg=color_btn)
l_ip_5.place(x=400,y=145) #Define a Label and put it in position
label_ins=tk.Label(root,width=71,text='Instruction',fg=color_text,bg=color_btn)
label_ins.place(x=30,y=300) #Define a Label and put it in position
E1 = tk.Entry(root,show=None,width=16,bg="#37474F",fg='#eceff1')
E1.place(x=180,y=40) #Define a Entry and put it in position
l_ip_3=tk.Label(root,width=10,text='IP Address:',fg=color_text,bg='#000000')
l_ip_3.place(x=175,y=15) #Define a Label and put it in position
label_openCV=tk.Label(root,width=28,text='OpenCV Status',fg=color_text,bg=color_btn)
label_openCV.place(x=180,y=110) #Define a Label and put it in position
canvas_ultra=tk.Canvas(root,bg=color_btn,height=23,width=352,highlightthickness=0)
canvas_ultra.place(x=30,y=145)
################################
#canvas_rec=canvas_ultra.create_rectangle(0,0,340,30,fill = '#FFFFFF',width=0)
#canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic Output: 0.75m',fill=color_text)
################################
Btn0 = tk.Button(root, width=8, text='Forward',fg=color_text,bg=color_btn,relief='ridge')
Btn1 = tk.Button(root, width=8, text='Backward',fg=color_text,bg=color_btn,relief='ridge')
Btn2 = tk.Button(root, width=8, text='Left',fg=color_text,bg=color_btn,relief='ridge')
Btn3 = tk.Button(root, width=8, text='Right',fg=color_text,bg=color_btn,relief='ridge')
Btn_LeftSide = tk.Button(root, width=8, text='<--',fg=color_text,bg=color_btn,relief='ridge')
Btn_LeftSide.place(x=30,y=195)
Btn_LeftSide.bind('<ButtonPress-1>', call_LeftSide)
Btn_LeftSide.bind('<ButtonRelease-1>', call_Turn_stop)
Btn_RightSide = tk.Button(root, width=8, text='-->',fg=color_text,bg=color_btn,relief='ridge')
Btn_RightSide.place(x=170,y=195)
Btn_RightSide.bind('<ButtonPress-1>', call_RightSide)
Btn_RightSide.bind('<ButtonRelease-1>', call_Turn_stop)
Btn0.place(x=100,y=195)
Btn1.place(x=100,y=230)
Btn2.place(x=30,y=230)
Btn3.place(x=170,y=230)
Btn0.bind('<ButtonPress-1>', call_forward)
Btn1.bind('<ButtonPress-1>', call_back)
Btn2.bind('<ButtonPress-1>', call_Left)
Btn3.bind('<ButtonPress-1>', call_Right)
Btn0.bind('<ButtonRelease-1>', call_FB_stop)
Btn1.bind('<ButtonRelease-1>', call_FB_stop)
Btn2.bind('<ButtonRelease-1>', call_Turn_stop)
Btn3.bind('<ButtonRelease-1>', call_Turn_stop)
root.bind('<KeyPress-w>', call_forward)
root.bind('<KeyPress-a>', call_Left)
root.bind('<KeyPress-d>', call_Right)
root.bind('<KeyPress-s>', call_back)
root.bind('<KeyPress-q>', call_LeftSide)
root.bind('<KeyPress-e>', call_RightSide)
root.bind('<KeyRelease-q>', call_Turn_stop)
root.bind('<KeyRelease-e>', call_Turn_stop)
root.bind('<KeyRelease-w>', call_FB_stop)
root.bind('<KeyRelease-a>', call_Turn_stop)
root.bind('<KeyRelease-d>', call_Turn_stop)
root.bind('<KeyRelease-s>', call_FB_stop)
Btn_up = tk.Button(root, width=8, text='Up',fg=color_text,bg=color_btn,relief='ridge')
Btn_down = tk.Button(root, width=8, text='Down',fg=color_text,bg=color_btn,relief='ridge')
Btn_left = tk.Button(root, width=8, text='Grab',fg=color_text,bg=color_btn,relief='ridge')
Btn_right = tk.Button(root, width=8, text='Loose',fg=color_text,bg=color_btn,relief='ridge')
Btn_home = tk.Button(root, width=8, text='Home',fg=color_text,bg=color_btn,relief='ridge')
Btn_up.place(x=400,y=195)
Btn_down.place(x=400,y=230)
Btn_left.place(x=330,y=230)
Btn_right.place(x=470,y=230)
Btn_home.place(x=250,y=230)
Btn_Cleft = tk.Button(root, width=8, text='\\',fg=color_text,bg=color_btn,relief='ridge')
Btn_Cright = tk.Button(root, width=8, text='/',fg=color_text,bg=color_btn,relief='ridge')
Btn_Cleft.place(x=330, y=195)
Btn_Cright.place(x=470, y=195)
root.bind('<KeyPress-u>', call_CLeft)
root.bind('<KeyPress-o>', call_CRight)
root.bind('<KeyPress-i>', call_headup)
root.bind('<KeyPress-k>', call_headdown)
root.bind('<KeyPress-j>', call_headleft)
root.bind('<KeyPress-l>', call_headright)
root.bind('<KeyPress-f>', call_headhome)
Btn_Cleft.bind('<ButtonPress-1>', call_CLeft)
Btn_Cright.bind('<ButtonPress-1>', call_CRight)
Btn_up.bind('<ButtonPress-1>', call_headup)
Btn_down.bind('<ButtonPress-1>', call_headdown)
Btn_left.bind('<ButtonPress-1>', call_headleft)
Btn_right.bind('<ButtonPress-1>', call_headright)
Btn_home.bind('<ButtonPress-1>', call_headhome)
Btn14= tk.Button(root, width=8,height=2, text='Connect',fg=color_text,bg=color_btn,command=connect_click,relief='ridge')
Btn14.place(x=315,y=15) #Define a Button and put it in position
root.bind('<Return>', connect)
var_R = tk.StringVar()
var_R.set(0)
Scale_R = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_R,troughcolor='#F44336',command=set_R,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_R.place(x=30,y=330) #Define a Scale and put it in position
var_G = tk.StringVar()
var_G.set(0)
Scale_G = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_G,troughcolor='#00E676',command=set_G,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_G.place(x=30,y=360) #Define a Scale and put it in position
var_B = tk.StringVar()
var_B.set(0)
Scale_B = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_B,troughcolor='#448AFF',command=set_B,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_B.place(x=30,y=390) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=30,y=420)
Btn_Steady = tk.Button(root, width=10, text='Ultrasonic',fg=color_text,bg=color_btn,relief='ridge')
Btn_Steady.place(x=30,y=445)
root.bind('<KeyPress-z>', call_steady)
Btn_Steady.bind('<ButtonPress-1>', call_steady)
Btn_FindColor = tk.Button(root, width=10, text='FindColor',fg=color_text,bg=color_btn,relief='ridge')
Btn_FindColor.place(x=115,y=445)
root.bind('<KeyPress-z>', call_FindColor)
Btn_FindColor.bind('<ButtonPress-1>', call_FindColor)
Btn_WatchDog = tk.Button(root, width=10, text='WatchDog',fg=color_text,bg=color_btn,relief='ridge')
Btn_WatchDog.place(x=200,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_WatchDog.bind('<ButtonPress-1>', call_WatchDog)
Btn_Fun4 = tk.Button(root, width=10, text='FindLine',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun4.place(x=285,y=445)
root.bind('<KeyPress-z>', call_FindLine)
Btn_Fun4.bind('<ButtonPress-1>', call_FindLine)
Btn_Fun5 = tk.Button(root, width=10, text='Function 5',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun5.place(x=370,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_Fun5.bind('<ButtonPress-1>', call_WatchDog)
Btn_Fun6 = tk.Button(root, width=10, text='Function 6',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun6.place(x=455,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_Fun6.bind('<ButtonPress-1>', call_WatchDog)
ins_threading=thread.Thread(target=instruction) #Define a thread for FPV and OpenCV
ins_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ins_threading.start() #Thread starts
global stat
if stat==0: # Ensure the mainloop runs only once
root.mainloop() # Run the mainloop()
stat=1 # Change the value to '1' so the mainloop() would not run again.
if __name__ == '__main__':
try:
loop() # Load GUI
except:
tcpClicSock.close() # Close socket or it may not connect with the server again
footage_socket.close()
cv2.destroyAllWindows()
pass
|
protocol_arduinosimulator.py
|
"""Provides a simple simulator for telemetry_board.ino or camera_board.ino.
We use the pragma "no cover" in several places that happen to never be
reached or that would only be reached if the code was called directly,
i.e. not in the way it is intended to be used.
"""
import copy
import datetime
import queue
import random
import threading
import time
import urllib.parse
import panoptes.utils.serial.handlers.protocol_no_op
from loguru import logger
from panoptes.utils.serializers import from_json
from panoptes.utils.serializers import to_json
from serial import serialutil
from serial.serialutil import PortNotOpenError
def _drain_queue(q):
cmd = None
while not q.empty():
cmd = q.get_nowait()
return cmd # Present just for debugging.
class ArduinoSimulator:
"""Simulates the serial behavior of the PANOPTES Arduino sketches.
The RS-232 connection is simulated with an input and output queue of bytes. This class provides
a run function which can be called from a Thread to execute. Every two seconds while running it
will generate another json output line, and then send that to the json_queue in small chunks
at a rate similar to 9600 baud, the rate used by our Arduino sketches.
"""
def __init__(self, message, relay_queue, json_queue, chunk_size, stop):
"""
Args:
message: The message to be sent (millis and report_num will be added).
relay_queue: The queue.Queue instance from which relay command
bytes are read and acted upon. Elements are of type bytes.
json_queue: The queue.Queue instance to which json messages
(serialized to bytes) are written at ~9600 baud. Elements
are of type bytes (i.e. each element is a sequence of bytes of
length up to chunk_size).
chunk_size: The number of bytes to write to json_queue at a time.
stop: a threading.Event which is checked to see if run should stop executing.
"""
self.message = copy.deepcopy(message)
self.logger = logger
self.logger.critical(f'message: {message}')
self.relay_queue = relay_queue
self.json_queue = json_queue
self.stop = stop
# Time between producing messages.
self.message_delta = datetime.timedelta(seconds=2)
self.next_message_time = None
# Size of a chunk of bytes.
self.chunk_size = chunk_size
# Interval between outputing chunks of bytes.
chunks_per_second = 1000.0 / self.chunk_size
chunk_interval = 1.0 / chunks_per_second
self.logger.debug(f'chunks_per_second={chunks_per_second} chunk_interval={chunk_interval}')
self.chunk_delta = datetime.timedelta(seconds=chunk_interval)
self.next_chunk_time = None
self.pending_json_bytes = bytearray()
self.pending_relay_bytes = bytearray()
self.command_lines = []
self.start_time = datetime.datetime.now()
self.report_num = 0
self.logger.info('ArduinoSimulator created')
def __del__(self):
if not self.stop.is_set(): # pragma: no cover
self.logger.critical('ArduinoSimulator.__del__ stop is NOT set')
def run(self):
"""Produce messages periodically and emit their bytes at a limited rate."""
self.logger.info('ArduinoSimulator.run ENTER')
# Produce a message right away, but remove a random number of bytes at the start to reflect
# what happens when we connect at a random time to the Arduino.
now = datetime.datetime.now()
self.next_chunk_time = now
self.next_message_time = now + self.message_delta
b = self.generate_next_message_bytes(now)
cut = random.randrange(len(b))
if cut > 0:
self.logger.info(f'Cutting off the leading {cut} bytes of the first message')
b = b[cut:]
self.pending_json_bytes.extend(b)
# Now two interleaved loops:
# 1) Generate messages every self.message_delta
# 2) Emit a chunk of bytes from pending_json_bytes every self.chunk_delta.
# Clearly we need to emit all the bytes from pending_json_bytes at least
# as fast as we append new messages to it, else we'll have a problem
# (i.e. the simulated baud rate will be too slow for the output rate).
while True:
if self.stop.is_set():
self.logger.info('Returning from ArduinoSimulator.run EXIT')
return
now = datetime.datetime.now()
if now >= self.next_chunk_time:
self.output_next_chunk(now)
if now >= self.next_message_time:
self.generate_next_message(now)
if self.pending_json_bytes and self.next_chunk_time < self.next_message_time:
next_time = self.next_chunk_time
else:
next_time = self.next_message_time
self.read_relay_queue_until(next_time)
def handle_pending_relay_bytes(self):
"""Process complete relay commands."""
newline = b'\n'
while True:
index = self.pending_relay_bytes.find(newline)
if index < 0:
break
line = str(self.pending_relay_bytes[0:index], 'ascii')
self.logger.info(f'Received command: {line}')
del self.pending_relay_bytes[0:index + 1]
self.command_lines.append(line)
if self.pending_relay_bytes:
self.logger.info(f'Accumulated {len(self.pending_relay_bytes)} bytes.')
def read_relay_queue_until(self, next_time):
"""Read and process relay queue bytes until time for the next action."""
while True:
now = datetime.datetime.now()
if now >= next_time:
# Already reached the time for the next main loop event,
# so return to repeat the main loop.
return
remaining = (next_time - now).total_seconds()
assert remaining > 0
self.logger.info(f'ArduinoSimulator.read_relay_queue_until remaining={remaining}')
try:
b = self.relay_queue.get(block=True, timeout=remaining)
assert isinstance(b, (bytes, bytearray))
self.pending_relay_bytes.extend(b)
self.handle_pending_relay_bytes()
# Fake a baud rate for reading by waiting based on the
# number of bytes we just read.
time.sleep(1.0 / 1000 * len(b))
except queue.Empty:
# Not returning here so that the return above is will be
# hit every time this method executes.
pass
def output_next_chunk(self, now):
"""Output one chunk of pending json bytes."""
self.next_chunk_time = now + self.chunk_delta
if len(self.pending_json_bytes) == 0:
return
last = min(self.chunk_size, len(self.pending_json_bytes))
chunk = bytes(self.pending_json_bytes[0:last])
del self.pending_json_bytes[0:last]
if self.json_queue.full():
self.logger.info('Dropping chunk because the queue is full')
return
self.json_queue.put_nowait(chunk)
self.logger.debug('output_next_chunk -> {}', chunk)
def generate_next_message(self, now):
"""Append the next message to the pending bytearray and scheduled the next message."""
b = self.generate_next_message_bytes(now)
self.pending_json_bytes.extend(b)
self.next_message_time = datetime.datetime.now() + self.message_delta
def generate_next_message_bytes(self, now):
"""Generate the next message (report) from the simulated Arduino."""
# Not worrying here about emulating the 32-bit nature of millis (wraps in 49 days)
elapsed = int((now - self.start_time).total_seconds() * 1000)
self.report_num += 1
self.message['millis'] = elapsed
self.message['report_num'] = self.report_num
if self.command_lines:
self.message['commands'] = self.command_lines
self.command_lines = []
s = to_json(self.message) + '\r\n'
if 'commands' in self.message:
del self.message['commands']
self.logger.debug('generate_next_message -> {!r}', s)
b = s.encode(encoding='ascii')
return b
class FakeArduinoSerialHandler(panoptes.utils.serial.handlers.protocol_no_op.NoOpSerial):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logger
self.simulator_thread = None
self.relay_queue = queue.Queue(maxsize=1)
self.json_queue = queue.Queue(maxsize=1)
self.json_bytes = bytearray()
self.stop = threading.Event()
self.stop.set()
self.device_simulator = None
def __del__(self):
if self.simulator_thread: # pragma: no cover
self.logger.critical('ArduinoSimulator.__del__ simulator_thread is still present')
self.stop.set()
self.simulator_thread.join(timeout=3.0)
def open(self):
"""Open port.
Raises:
SerialException if the port cannot be opened.
"""
if not self.is_open:
self.is_open = True
self._reconfigure_port()
def close(self):
"""Close port immediately."""
self.is_open = False
self._reconfigure_port()
@property
def in_waiting(self):
"""The number of input bytes available to read immediately."""
if not self.is_open:
raise PortNotOpenError
# Not an accurate count because the elements of self.json_queue are arrays, not individual
# bytes.
return len(self.json_bytes) + self.json_queue.qsize()
def reset_input_buffer(self):
"""Flush input buffer, discarding all it’s contents."""
self.json_bytes.clear()
_drain_queue(self.json_queue)
def read(self, size=1):
"""Read size bytes.
If a timeout is set it may return fewer characters than requested.
With no timeout it will block until the requested number of bytes
is read.
Args:
size: Number of bytes to read.
Returns:
Bytes read from the port, of type 'bytes'.
"""
if not self.is_open:
raise PortNotOpenError
# Not checking if the config is OK, so will try to read from a possibly
# empty queue if using the wrong baudrate, etc. This is deliberate.
response = bytearray()
timeout_obj = serialutil.Timeout(self.timeout)
while True:
b = self._read1(timeout_obj)
if b:
response.extend(b)
if size is not None and len(response) >= size:
break
else: # pragma: no cover
# The timeout expired while in _read1.
break
if timeout_obj.expired(): # pragma: no cover
break
response = bytes(response)
return response
def readline(self):
"""Read and return one line from the simulator.
This override exists just to support logging of the line.
"""
line = super().readline()
self.logger.debug(f'FakeArduinoSerialHandler.readline -> {line!r}')
return line
@property
def out_waiting(self):
"""The number of bytes in the output buffer."""
if not self.is_open:
raise PortNotOpenError
# Not an accurate count because the elements of self.relay_queue are arrays, not individual
# bytes.
return self.relay_queue.qsize()
def reset_output_buffer(self):
"""Clear output buffer.
Aborts the current output, discarding all that is in the output buffer.
"""
if not self.is_open:
raise PortNotOpenError
_drain_queue(self.relay_queue)
def flush(self):
"""Write the buffered data to the output device.
We interpret that here as waiting until the simulator has taken all of the
entries from the queue.
"""
if not self.is_open:
raise PortNotOpenError
while not self.relay_queue.empty():
time.sleep(0.01)
def write(self, data):
"""Write the bytes data to the port.
Args:
data: The data to write (bytes or bytearray instance).
Returns:
Number of bytes written.
Raises:
SerialTimeoutException: In case a write timeout is configured for
the port and the time is exceeded.
"""
if not isinstance(data, (bytes, bytearray)):
raise ValueError('write takes bytes') # pragma: no cover
data = bytes(data) # Make sure it can't change.
self.logger.info('FakeArduinoSerialHandler.write({!r})', data)
try:
for n in range(len(data)):
one_byte = data[n:n + 1]
self.relay_queue.put(one_byte, block=True, timeout=self.write_timeout)
return len(data)
except queue.Full: # pragma: no cover
# This exception is "lossy" in that the caller can't tell how much was written.
raise serialutil.Timeout
# --------------------------------------------------------------------------
@property
def is_config_ok(self):
"""Does the caller ask for the correct serial device config?"""
# The default Arduino data, parity and stop bits are: 8 data bits, no parity, one stop bit.
v = (self.baudrate == 9600 and self.bytesize == serialutil.EIGHTBITS and
self.parity == serialutil.PARITY_NONE and not self.rtscts and not self.dsrdtr)
# All existing tests ensure the config is OK, so we never log here.
if not v: # pragma: no cover
self.logger.critical(f'Serial config is not OK: {self.get_settings()!r}')
return v
def _read1(self, timeout_obj):
"""Read 1 byte of input, of type bytes."""
# _read1 is currently called only from read(), which checks that the
# serial device is open, so is_open is always true.
if not self.is_open: # pragma: no cover
raise PortNotOpenError
if not self.json_bytes:
try:
entry = self.json_queue.get(block=True, timeout=timeout_obj.time_left())
assert isinstance(entry, bytes)
self.json_bytes.extend(entry)
except queue.Empty:
return None
# Unless something has gone wrong, json_bytes is always non-empty here.
if not self.json_bytes: # pragma: no cover
return None
c = bytes(self.json_bytes[0:1])
del self.json_bytes[0:1]
return c
# --------------------------------------------------------------------------
# There are a number of methods called by SerialBase that need to be
# implemented by sub-classes, assuming their calls haven't been blocked
# by replacing the calling methods/properties. These are no-op
# implementations.
def _reconfigure_port(self):
"""Reconfigure the open port after a property has been changed.
If you need to know which property has been changed, override the
setter for the appropriate properties.
"""
need_thread = self.is_open and self.is_config_ok
if need_thread and not self.simulator_thread:
_drain_queue(self.relay_queue)
_drain_queue(self.json_queue)
self.json_bytes.clear()
self.stop.clear()
params = self._params_from_url(self.portstr)
self._create_simulator(params)
self.simulator_thread = threading.Thread(
name='Device Simulator', target=lambda: self.device_simulator.run(), daemon=True)
self.simulator_thread.start()
elif self.simulator_thread and not need_thread:
self.stop.set()
self.simulator_thread.join(timeout=30.0)
if self.simulator_thread.is_alive():
# Not a SerialException, but a test infrastructure error.
raise Exception(f'{self.simulator_thread.name} did not stop!') # pragma: no cover
self.simulator_thread = None
self.device_simulator = None
_drain_queue(self.relay_queue)
_drain_queue(self.json_queue)
self.json_bytes.clear()
def _update_rts_state(self):
"""Handle rts being set to some value.
"self.rts = value" has been executed, for some value. This may not
have changed the value.
"""
# We never set rts in our tests, so this doesn't get executed.
pass # pragma: no cover
def _update_dtr_state(self):
"""Handle dtr being set to some value.
"self.dtr = value" has been executed, for some value. This may not
have changed the value.
"""
# We never set dtr in our tests, so this doesn't get executed.
pass # pragma: no cover
def _update_break_state(self):
"""Handle break_condition being set to some value.
"self.break_condition = value" has been executed, for some value.
This may not have changed the value.
Note that break_condition is set and then cleared by send_break().
"""
# We never set break_condition in our tests, so this doesn't get executed.
pass # pragma: no cover
# --------------------------------------------------------------------------
# Internal (non-standard) methods.
def _params_from_url(self, url):
"""Extract various params from the URL."""
expected = 'expected a string in the form "arduinosimulator://[?board=<name>]"'
parts = urllib.parse.urlparse(url)
# Unless we force things (break the normal protocol), scheme will always
# be 'arduinosimulator'.
if parts.scheme != 'arduinosimulator':
raise Exception(f'{expected}: got scheme {parts.scheme!r}') # pragma: no cover
int_param_names = {'chunk_size', 'read_buffer_size', 'write_buffer_size'}
params = {}
for option, values in urllib.parse.parse_qs(parts.query, True).items():
if option == 'board' and len(values) == 1:
params[option] = values[0]
elif option == 'name' and len(values) == 1:
# This makes it easier for tests to confirm the right serial device has
# been opened.
self.name = values[0]
elif option in int_param_names and len(values) == 1:
params[option] = int(values[0])
else:
raise Exception(f'{expected}: unknown param {option!r}') # pragma: no cover
return params
def _create_simulator(self, params):
board = params.get('board', 'telemetry')
if board == 'telemetry':
message = from_json("""
{
"name":"telemetry_board",
"ver":"2017-09-23",
"power": {
"computer":1,
"fan":1,
"mount":1,
"cameras":1,
"weather":1,
"main":1
},
"current": {"main":387,"fan":28,"mount":34,"cameras":27},
"amps": {"main":1083.60,"fan":50.40,"mount":61.20,"cameras":27.00},
"humidity":42.60,
"temperature":[13.01,12.81,19.75],
"temp_00":15.50
}
""")
elif board == 'camera':
message = from_json("""
{
"name":"camera_board",
"inputs":6,
"camera_00":1,
"camera_01":1,
"accelerometer": {"x":-7.02, "y":6.95, "z":1.70, "o": 6},
"humidity":59.60,
"temperature":[13.01,12.81,19.75],
"temp_00":12.50
}
""")
elif board == 'json_object':
# Produce an output that is json, but not what we expect
message = {}
else:
raise Exception(f'Unknown board: {board}') # pragma: no cover
# The elements of these queues are of type bytes. This means we aren't fully controlling
# the baudrate unless the chunk_size is 1, but that should be OK.
chunk_size = params.get('chunk_size', 20)
self.json_queue = queue.Queue(maxsize=params.get('read_buffer_size', 10000))
self.relay_queue = queue.Queue(maxsize=params.get('write_buffer_size', 100))
self.device_simulator = ArduinoSimulator(message, self.relay_queue, self.json_queue,
chunk_size, self.stop)
Serial = FakeArduinoSerialHandler
|
thread_monitor.py
|
import sys
try:
import queue
except ImportError:
import Queue as queue
class ThreadMonitor(object):
"""Helper class for catching exceptions generated in threads.
http://blog.eugeneoden.com/2008/05/12/testing-threads-with-pytest/
Usage:
mon = ThreadMonitor()
th = threading.Thread(target=mon.wrap(myFunction))
th.start()
th.join()
mon.check() # raises any exception generated in the thread
Any raised exception will include a traceback from the original
thread, not the function calling mon.check()
Works for multiple threads
"""
def __init__(self):
self.queue = queue.Queue()
def wrap(self, function):
def threadMonitorWrapper(*args, **kw):
try:
ret = function(*args, **kw)
except Exception:
self.queue.put(sys.exc_info())
raise
return ret
return threadMonitorWrapper
def check(self):
try:
item = self.queue.get(block=False)
except queue.Empty:
return
klass, value, tb = item
exc = klass(value)
if hasattr(exc, "with_traceback"):
raise exc.with_traceback(tb)
else:
raise exc
|
config_asav.py
|
#!/usr/bin/env python3
# scripts/config_asav.py
#
# Import/Export script for vIOS.
#
# @author Andrea Dainese <andrea.dainese@gmail.com>
# @copyright 2014-2016 Andrea Dainese
# @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE
# @link http://www.unetlab.com/
# @version 20160719
import getopt, multiprocessing, os, pexpect, re, sys, time
username = 'admin'
password = 'cisco'
secret = ''
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('a\r\n')
i = handler.expect([
'Username:',
'\(config',
'>',
'#'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline(username)
try:
handler.expect('Password:', timeout = expctimeout)
except:
print('ERROR: error waiting for "Password:" prompt.')
node_quit(handler)
return False
handler.sendline(password)
try:
j = handler.expect(['>', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for [">", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Secret password required
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 1:
# Config mode detected, need to exit
handler.sendline('end')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif i == 2:
# Need higher privilege
handler.sendline('enable')
try:
j = handler.expect(['Password:', '#'])
except:
print('ERROR: error waiting for ["Password:", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Need do provide secret
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 3:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_firstlogin(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect('ciscoasa>', timeout = 5)
except:
i = -1
if i == 0:
# Need higher privilege
handler.sendline('enable')
try:
j = handler.expect(['Password:', '#'])
except:
print('ERROR: error waiting for ["Password:", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Need do provide secret
handler.sendline('')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('quit\n')
handler.close()
def config_get(handler):
# Clearing all "expect" buffer
while True:
try:
handler.expect('#', timeout = 0.1)
except:
break
# Disable paging
handler.sendline('terminal pager 0')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
# Getting the config
handler.sendline('more system:running-config')
try:
handler.expect('#', timeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = re.sub('.*: Saved\n', '', config, flags=re.DOTALL) # Header
config = re.sub(': end.*', ': end\n', config, flags=re.DOTALL) # Footer
return config
def config_put(handler):
while True:
try:
i = handler.expect('>', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
# Login to the device and get a privileged prompt
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
node_quit(handler)
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a) * 1000
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
mqtt_client.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
MQTT client utility: Tries to hide Paho client details to ease MQTT usage.
Reconnects to the MQTT server automatically.
This module depends on the paho-mqtt package (ex-mosquitto), provided by the
Eclipse Foundation: see http://www.eclipse.org/paho
:author: Thomas Calmant
:copyright: Copyright 2015, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2015 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import os
import sys
import threading
# MQTT client
import paho.mqtt.client as paho
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class MqttClient(object):
"""
Remote Service discovery provider based on MQTT
"""
def __init__(self, client_id=None):
"""
Sets up members
:param client_id: ID of the MQTT client
"""
# No ID
if not client_id:
# Randomize client ID
self._client_id = self.generate_id()
elif len(client_id) > 23:
# ID too large
_logger.warning("MQTT Client ID '%s' is too long (23 chars max): "
"generating a random one", client_id)
# Keep the client ID as it might be accepted
self._client_id = client_id
else:
# Keep the ID as is
self._client_id = client_id
# Reconnection timer
self.__timer = threading.Timer(5, self.__reconnect)
# Publication events
self.__in_flight = {}
# MQTT client
self.__mqtt = paho.Client(self._client_id)
# Give access to Paho methods to configure TLS
self.tls_set = self.__mqtt.tls_set
# Paho callbacks
self.__mqtt.on_connect = self.__on_connect
self.__mqtt.on_disconnect = self.__on_disconnect
self.__mqtt.on_message = self.__on_message
self.__mqtt.on_publish = self.__on_publish
@property
def raw_client(self):
"""
Returns the raw client object, depending on the underlying library
"""
return self.__mqtt
@staticmethod
def on_connect(client, result_code):
"""
User callback: called when the client is connected
:param client: The Pelix MQTT client which connected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_disconnect(client, result_code):
"""
User callback: called when the client is disconnected
:param client: The Pelix MQTT client which disconnected
:param result_code: The MQTT result code
"""
pass
@staticmethod
def on_message(client, message):
"""
User callback: called when the client has received a message
:param client: The Pelix MQTT client which received a message
:param message: The MQTT message
"""
pass
@classmethod
def generate_id(cls, prefix="pelix-"):
"""
Generates a random MQTT client ID
:param prefix: Client ID prefix (truncated to 8 chars)
:return: A client ID of 22 or 23 characters
"""
if not prefix:
# Normalize string
prefix = ""
else:
# Truncate long prefixes
prefix = prefix[:8]
# Prepare the missing part
nb_bytes = (23 - len(prefix)) // 2
random_bytes = os.urandom(nb_bytes)
if sys.version_info[0] >= 3:
random_ints = [char for char in random_bytes]
else:
random_ints = [ord(char) for char in random_bytes]
random_id = ''.join('{0:02x}'.format(value) for value in random_ints)
return "{0}{1}".format(prefix, random_id)
@classmethod
def topic_matches(cls, subscription_filter, topic):
"""
Checks if the given topic matches the given subscription filter
:param subscription_filter: A MQTT subscription filter
:param topic: A topic
:return: True if the topic matches the filter
"""
return paho.topic_matches_sub(subscription_filter, topic)
@property
def client_id(self):
"""
The MQTT client ID
"""
return self._client_id
def set_credentials(self, username, password):
"""
Sets the user name and password to be authenticated on the server
:param username: Client username
:param password: Client password
"""
self.__mqtt.username_pw_set(username, password)
def set_will(self, topic, payload, qos=0, retain=False):
"""
Sets up the will message
:param topic: Topic of the will message
:param payload: Content of the message
:param qos: Quality of Service
:param retain: The message will be retained
:raise ValueError: Invalid topic
:raise TypeError: Invalid payload
"""
self.__mqtt.will_set(topic, payload, qos, retain=retain)
def connect(self, host="localhost", port=1883, keepalive=60):
"""
Connects to the MQTT server. The client will automatically try to
reconnect to this server when the connection is lost.
:param host: MQTT server host
:param port: MQTT server port
:param keepalive: Maximum period in seconds between communications with
the broker
:raise ValueError: Invalid host or port
"""
# Disconnect first (it also stops the timer)
self.disconnect()
# Prepare the connection
self.__mqtt.connect(host, port, keepalive)
# Start the MQTT loop
self.__mqtt.loop_start()
def disconnect(self):
"""
Disconnects from the MQTT server
"""
# Stop the timer
self.__stop_timer()
# Unlock all publishers
for event in self.__in_flight.values():
event.set()
# Disconnect from the server
self.__mqtt.disconnect()
# Stop the MQTT loop thread
# Use a thread to avoid a dead lock in Paho
thread = threading.Thread(target=self.__mqtt.loop_stop)
thread.daemon = True
thread.start()
# Give it some time
thread.join(4)
def publish(self, topic, payload, qos=0, retain=False, wait=False):
"""
Sends a message through the MQTT connection
:param topic: Message topic
:param payload: Message content
:param qos: Quality of Service
:param retain: Retain flag
:param wait: If True, prepares an event to wait for the message to be
published
:return: The local message ID, None on error
"""
result = self.__mqtt.publish(topic, payload, qos, retain)
if wait and not result[0]:
# Publish packet sent, wait for it to return
self.__in_flight[result[1]] = threading.Event()
_logger.debug("Waiting for publication of %s", topic)
return result[1]
def wait_publication(self, mid, timeout=None):
"""
Wait for a publication to be validated
:param mid: Local message ID (result of publish)
:param timeout: Wait timeout (in seconds)
:return: True if the message was published, False if timeout was raised
:raise KeyError: Unknown waiting local message ID
"""
return self.__in_flight[mid].wait(timeout)
def subscribe(self, topic, qos=0):
"""
Subscribes to a topic on the server
:param topic: Topic filter string(s)
:param qos: Desired quality of service
:raise ValueError: Invalid topic or QoS
"""
self.__mqtt.subscribe(topic, qos)
def unsubscribe(self, topic):
"""
Unscribes from a topic on the server
:param topic: Topic(s) to unsubscribe from
:raise ValueError: Invalid topic parameter
"""
self.__mqtt.unsubscribe(topic)
def __start_timer(self, delay):
"""
Starts the reconnection timer
:param delay: Delay (in seconds) before calling the reconnection method
"""
self.__timer = threading.Timer(delay, self.__reconnect)
self.__timer.daemon = True
self.__timer.start()
def __stop_timer(self):
"""
Stops the reconnection timer, if any
"""
if self.__timer is not None:
self.__timer.cancel()
self.__timer = None
def __reconnect(self):
"""
Tries to connect to the MQTT server
"""
# Cancel the timer, if any
self.__stop_timer()
try:
# Try to reconnect the server
result_code = self.__mqtt.reconnect()
if result_code:
# Something wrong happened
message = "Error connecting the MQTT server: {0} ({1})" \
.format(result_code, paho.error_string(result_code))
_logger.error(message)
raise ValueError(message)
except Exception as ex:
# Something went wrong: log it
_logger.error("Exception connecting server: %s", ex)
finally:
# Prepare a reconnection timer. It will be cancelled by the
# on_connect callback
self.__start_timer(10)
def __on_connect(self, client, userdata, flags, result_code):
"""
Client connected to the server
:param client: Connected Paho client
:param userdata: User data (unused)
:param flags: Response flags sent by the broker
:param result_code: Connection result code (0: success, others: error)
"""
if result_code:
# result_code != 0: something wrong happened
_logger.error("Error connecting the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
else:
# Connection is OK: stop the reconnection timer
self.__stop_timer()
# Notify the caller, if any
if self.on_connect is not None:
try:
self.on_connect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_disconnect(self, client, userdata, result_code):
"""
Client has been disconnected from the server
:param client: Client that received the message
:param userdata: User data (unused)
:param result_code: Disconnection reason (0: expected, 1: error)
"""
if result_code:
# rc != 0: unexpected disconnection
_logger.error(
"Unexpected disconnection from the MQTT server: %s (%d)",
paho.connack_string(result_code), result_code)
# Try to reconnect
self.__stop_timer()
self.__start_timer(2)
# Notify the caller, if any
if self.on_disconnect is not None:
try:
self.on_disconnect(self, result_code)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_message(self, client, userdata, msg):
"""
A message has been received from a server
:param client: Client that received the message
:param userdata: User data (unused)
:param msg: A MQTTMessage bean
"""
# Notify the caller, if any
if self.on_message is not None:
try:
self.on_message(self, msg)
except Exception as ex:
_logger.exception("Error notifying MQTT listener: %s", ex)
def __on_publish(self, client, userdata, mid):
"""
A message has been published by a server
:param client: Client that received the message
:param userdata: User data (unused)
:param mid: Message ID
"""
try:
self.__in_flight[mid].set()
except KeyError:
pass
|
colorlabels.py
|
import getpass
import itertools
import os
import platform
import sys
import threading
import time
# Deal with Python 2 & 3 compatibility problem.
PY2 = sys.version_info[0] < 3
_input = raw_input if PY2 else input
_main_thread = threading.current_thread()
# TTY detection and configuration.
COLORLABELS_TTY = os.getenv('COLORLABELS_TTY')
if COLORLABELS_TTY is None:
is_tty = sys.stdout.isatty() # auto detect
elif COLORLABELS_TTY.lower() in {'1', 'yes', 'y', 'true', 'on'}:
is_tty = True # force tty mode
elif COLORLABELS_TTY.lower() in {'0', 'no', 'n', 'false', 'off'}:
is_tty = False # force non-tty mode (no color or progress animations)
else:
raise ValueError('invalid value {!r} for COLORLABELS_TTY'.format(COLORLABELS_TTY))
def color_code(color_number):
"""Generate an ANSI escape sequence with the given color number or description string."""
return '\033[' + str(color_number) + 'm'
# Standard colors.
BLACK = color_code(30)
RED = color_code(31)
GREEN = color_code(32)
YELLOW = color_code(33)
BLUE = color_code(34)
MAGENTA = color_code(35)
CYAN = color_code(36)
WHITE = color_code(37)
BRIGHT_BLACK = color_code(90)
BRIGHT_RED = color_code(91)
BRIGHT_GREEN = color_code(92)
BRIGHT_YELLOW = color_code(93)
BRIGHT_BLUE = color_code(94)
BRIGHT_MAGENTA = color_code(95)
BRIGHT_CYAN = color_code(96)
BRIGHT_WHITE = color_code(97)
COLOR_RESET = color_code(0) # Reset color settings in console.
COLOR_NONE = '' # Does not change color.
CLEAR_LINE = '\r\033[K' # Erase all characters on the line.
# All label types.
all_labels = ('section', 'item', 'success', 'warning', 'error', 'info',
'progress', 'plain', 'question', 'input', 'password')
# Default colors for each kind of label.
default_colors = {
'section': BRIGHT_MAGENTA,
'item': COLOR_NONE,
'success': BRIGHT_GREEN,
'warning': BRIGHT_YELLOW,
'error': BRIGHT_RED,
'info': BRIGHT_CYAN,
'progress': BRIGHT_CYAN,
'plain': COLOR_NONE,
'question': BRIGHT_CYAN,
'input': BRIGHT_CYAN,
'password': BRIGHT_CYAN,
}
# Custom settings of colors for each kind of label.
custom_colors = {}
for _label_type in all_labels:
custom_colors[_label_type] = None
# Default and custom color span settings.
# 0 -> no color
# 1 -> color the mark
# 2 -> color the header
# 3 -> color the whole line
default_color_span = 3
custom_color_span = None
# Default marks for each kind of label.
default_marks = {
'section': '#',
'item': '*',
'success': '+',
'warning': '!',
'error': '-',
'info': 'i',
'progress': '=',
'plain': '*',
'question': '?',
'input': '>',
'password': '>',
}
# Custom settings of marks for each kind of label.
custom_marks = {}
for _label_type in all_labels:
custom_marks[_label_type] = None
# Header pattern.
header_pattern = '[{mark}]'
# Default and custom header settings.
default_show_header = True
custom_show_header = None
# Modes of the progress label.
PROGRESS_STATIC = 0
PROGRESS_SPIN = 1
PROGRESS_EXPAND = 2
PROGRESS_MOVE = 3
PROGRESS_DETERMINATE = 4
# Default settings of different progress modes.
default_progress_config = {
PROGRESS_SPIN: {
'position': 'mark',
'interval': 0.1,
'erase': False
},
PROGRESS_EXPAND: {
'char': '.',
'width': 3,
'interval': 1,
'erase': False
},
PROGRESS_MOVE: {
'char': '.',
'num': 3,
'width': 12,
'style': 'loop',
'interval': 0.1,
'erase': False
},
PROGRESS_DETERMINATE: {
'char_done': '=',
'char_head': '>',
'char_undone': ' ',
'width': 40,
'cleanup': False,
'erase': False
}
}
# Internal functions.
# Check whether the color is valid.
def _check_color(color):
if not isinstance(color, str):
raise TypeError("'color' should be a string")
# Check whether color span is valid.
def _check_color_span(color_span):
if not isinstance(color_span, int):
raise TypeError("'color_span' should be an integer")
if color_span not in {0, 1, 2, 3}:
raise ValueError("'color_span' should be one of 0, 1, 2 or 3")
# Check whether the mark is valid.
def _check_mark(mark):
if not isinstance(mark, str):
raise TypeError("'mark' should be a string")
# Check whether progress mode is valid.
def _check_progress_mode(mode):
if mode not in {PROGRESS_STATIC, PROGRESS_SPIN, PROGRESS_EXPAND, PROGRESS_MOVE, PROGRESS_DETERMINATE}:
raise ValueError('invalid progress mode')
# Check whether a value is one of the acceptable values.
def _check_value_in_list(value, field, valuelist):
if len(valuelist) < 2:
raise ValueError('should give at least 2 choices')
if value not in valuelist:
raise ValueError('{!r} should be {} or {!r}'.format(field, ', '.join(map(repr, valuelist[:-1])), valuelist[-1]))
# Check whether a value is a positive number.
def _check_positive_number(value, field):
if not isinstance(value, (int, float)):
raise TypeError('{!r} should be a number'.format(field))
if value <= 0:
raise ValueError('{!r} should be a positive number'.format(field))
# Check whether a value is a character.
def _check_character(value, field):
if not isinstance(value, str):
raise TypeError('{!r} should be a string'.format(field))
if len(value) != 1:
raise ValueError('{!r} should be one character'.format(field))
# Check whether a value is an integer not less than a given value.
def _check_interger_minimum(value, minimum, field):
if not isinstance(value, int):
raise TypeError('{!r} should be an integer'.format(field))
if value < minimum:
raise ValueError('{!r} should be at least {}'.format(field, minimum))
# Check whether a value is a valid percentage.
def _check_percent(value, field):
if not isinstance(value, (int, float)):
raise TypeError('{!r} should be a number'.format(field))
if value < 0 or value > 1:
raise ValueError('{!r} should be in range [0, 1]'.format(field))
# If parameter is present, check whether it is a string, and set config dict with the given key.
def _check_str_and_config_if_present(key, kwargs, target, target_key):
if key in kwargs:
value = kwargs[key]
if not isinstance(value, str):
raise TypeError('{!r} should be a string'.format(key))
target[target_key] = value
# Choose the value which will take effect from a list of layered settings.
def _layered_choice(*args):
if not args:
raise TypeError('should give at least one choice')
# Choose the first value which is not None.
for arg in args:
if arg is not None:
return arg
return None
# Print a string to stdout without appending '\n', and flush stdout.
def _inline_write(s):
sys.stdout.write(s)
sys.stdout.flush()
# Display a generic message label.
def _print_label(color, mark, msg, newline=True, reset_color=True, clear_line=True, **kwargs):
color_span = _layered_choice(kwargs.get('color_span'), custom_color_span, default_color_span)
show_header = _layered_choice(kwargs.get('show_header'), custom_show_header, default_show_header)
msg = str(msg)
_check_color(color)
_check_color_span(color_span)
_check_mark(mark)
if not is_tty: # disable color output for non-tty mode
color_span = 0
if show_header:
if color_span == 0: # No color.
out_string = header_pattern.format(mark=mark) + ' ' + msg
elif color_span == 1: # Color the mark.
out_string = header_pattern.format(mark=color + mark + COLOR_RESET) + ' ' + msg
elif color_span == 2: # Color the header.
out_string = color + header_pattern.format(mark=mark) + COLOR_RESET + ' ' + msg
else: # Color the whole line.
out_string = color + header_pattern.format(mark=mark) + ' ' + msg \
+ (COLOR_RESET if reset_color else COLOR_NONE)
else:
if color_span <= 2:
out_string = msg
else:
out_string = color + msg + (COLOR_RESET if reset_color else COLOR_NONE)
if clear_line and is_tty:
out_string = CLEAR_LINE + out_string
if newline:
out_string += '\n'
_inline_write(out_string)
# Display a generic input label.
def _input_label(color, mark, msg, **kwargs):
_print_label(color, mark, msg, newline=False, reset_color=False, **kwargs)
try:
input_data = _input()
finally:
if is_tty:
_inline_write(COLOR_RESET) # Ensure color reset.
return input_data
# Perform the final print of a progress label.
def _progress_final(color, mark, msg, **kwargs):
if kwargs['erase']:
_inline_write(CLEAR_LINE)
else:
_print_label(color, mark, msg, **kwargs)
# Thread for progress animations in indeterminate modes.
# We should take care of clearing excessive characters.
def _progress_print_thread(label, **kwargs):
if label.mode == PROGRESS_SPIN:
spin_gen = itertools.cycle('-\\|/')
elif label.mode == PROGRESS_EXPAND:
dots_gen = itertools.cycle(range(1, kwargs['width'] + 1))
elif label.mode == PROGRESS_MOVE:
direction = True
buf = kwargs['char'] * kwargs['num'] + ' ' * (kwargs['width'] - kwargs['num'])
msg = str(label.msg)
while not label.stopped:
if not _main_thread.is_alive():
return
if label.mode == PROGRESS_SPIN:
if kwargs['position'] == 'mark':
_print_label(label.color, next(spin_gen), msg, newline=False, **kwargs)
else:
_print_label(label.color, label.mark, msg + next(spin_gen), newline=False, **kwargs)
elif label.mode == PROGRESS_EXPAND:
_print_label(label.color, label.mark, msg + kwargs['char'] * next(dots_gen), newline=False, **kwargs)
elif label.mode == PROGRESS_MOVE:
_print_label(label.color, label.mark, msg + '[' + buf + ']', newline=False, **kwargs)
if direction:
buf = buf[-1] + buf[:-1]
else:
buf = buf[1:] + buf[0]
if kwargs['style'] == 'reflect' and kwargs['char'] in {buf[0], buf[-1]}:
direction = not direction
time.sleep(kwargs['interval'])
_progress_final(label.color, label.mark, msg, **kwargs)
class ProgressLabel:
def __init__(self, mode, color, mark, msg, **kwargs):
config = default_progress_config[mode].copy()
config.update(kwargs)
if mode == PROGRESS_SPIN:
_check_value_in_list(config['position'], 'position', ('mark', 'tail'))
_check_positive_number(config['interval'], 'interval')
elif mode == PROGRESS_EXPAND:
_check_character(config['char'], 'char')
_check_interger_minimum(config['width'], 2, 'width')
_check_positive_number(config['interval'], 'interval')
elif mode == PROGRESS_MOVE:
_check_character(config['char'], 'char')
if config['char'] == ' ':
raise ValueError("'char' cannot be space")
_check_interger_minimum(config['num'], 1, 'num')
_check_interger_minimum(config['width'], 2, 'width')
if config['num'] >= config['width']:
raise ValueError("'num' should be less than 'width'")
_check_value_in_list(config['style'], 'style', ('loop', 'reflect'))
_check_positive_number(config['interval'], 'interval')
elif mode == PROGRESS_DETERMINATE:
_check_character(config['char_done'], 'char_done')
_check_character(config['char_head'], 'char_head')
_check_character(config['char_undone'], 'char_undone')
_check_interger_minimum(config['width'], 0, 'width')
self.mode = mode
self.color = color
self.mark = mark
self.msg = msg
if not is_tty:
# Fall back to a static label if not in a tty.
_print_label(color, mark, msg, **config)
return
if mode in {PROGRESS_SPIN, PROGRESS_EXPAND, PROGRESS_MOVE}:
self.print_thread = threading.Thread(target=_progress_print_thread, args=(self,), kwargs=config)
self.stopped = False
self.print_thread.start()
elif mode == PROGRESS_DETERMINATE:
self.config = config
self.update(0)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.stop()
def update(self, percent, text=''):
"""Update progress to the given percentage in determinate mode.
You can provide additional text to describe current status."""
if self.mode != PROGRESS_DETERMINATE:
raise TypeError('cannot update progress in indeterminate mode')
_check_percent(percent, 'percent')
if not isinstance(text, str):
raise TypeError("'text' should be a string")
if not is_tty:
return
num_total = self.config['width']
if num_total:
num_done = int(round(num_total * percent))
if num_done < num_total:
bar = self.config['char_done'] * num_done + self.config['char_head'] + \
self.config['char_undone'] * (num_total - num_done - 1)
else:
bar = self.config['char_done'] * num_total
bar = '[' + bar + ']'
else:
bar = ''
msg = str(self.msg)
_print_label(self.color, self.mark, msg + bar + text, newline=False, **self.config)
def stop(self):
"""Stop progress animation."""
if not is_tty:
return
if self.mode in {PROGRESS_SPIN, PROGRESS_EXPAND, PROGRESS_MOVE}:
if not self.stopped:
self.stopped = True
self.print_thread.join()
elif self.mode == PROGRESS_DETERMINATE:
if not self.config['erase'] and not self.config['cleanup']:
_inline_write('\n')
else:
_progress_final(self.color, self.mark, self.msg, **self.config)
# Public functions that users are supposed to call.
def config(**kwargs):
"""Set up runtime global settings."""
# Color span configuration.
if 'color_span' in kwargs:
color_span = kwargs['color_span']
_check_color_span(color_span)
global custom_color_span
custom_color_span = color_span
# Header configuration.
if 'show_header' in kwargs:
global custom_show_header
custom_show_header = bool(kwargs['show_header'])
# Label colors configuration.
for label in all_labels:
_check_str_and_config_if_present(label + '_color', kwargs, custom_colors, label)
# Label marks configuration.
for label in all_labels:
_check_str_and_config_if_present(label + '_mark', kwargs, custom_marks, label)
def _get_color_and_mark(label_type, kwargs):
color = _layered_choice(kwargs.pop('color', None), custom_colors[label_type], default_colors[label_type])
mark = _layered_choice(kwargs.pop('mark', None), custom_marks[label_type], default_marks[label_type])
return color, mark
def _print_label_of_type(label_type, msg, **kwargs):
color, mark = _get_color_and_mark(label_type, kwargs)
_print_label(color, mark, msg, **kwargs)
def section(msg, **kwargs):
"""Display a section label containing the given message."""
_print_label_of_type('section', msg, **kwargs)
def item(msg, **kwargs):
"""Display an item label containing the given message."""
_print_label_of_type('item', msg, **kwargs)
def success(msg, **kwargs):
"""Display a success label containing the given message."""
_print_label_of_type('success', msg, **kwargs)
def warning(msg, **kwargs):
"""Display a warning label containing the given message."""
_print_label_of_type('warning', msg, **kwargs)
def error(msg, **kwargs):
"""Display an error label containing the given message."""
_print_label_of_type('error', msg, **kwargs)
def info(msg, **kwargs):
"""Display an info label containing the given message."""
_print_label_of_type('info', msg, **kwargs)
def progress(msg, mode=PROGRESS_STATIC, **kwargs):
"""Display a progress label containing the given message."""
color, mark = _get_color_and_mark('progress', kwargs)
_check_progress_mode(mode)
if mode == PROGRESS_STATIC:
return _print_label(color, mark, msg, **kwargs)
return ProgressLabel(mode, color, mark, msg, **kwargs)
def plain(msg, **kwargs):
"""Display a plain label containing the given message."""
_print_label_of_type('plain', msg, **kwargs)
def question(msg, **kwargs):
"""Display a question label containing the given message and prompt for user input."""
color, mark = _get_color_and_mark('question', kwargs)
return _input_label(color, mark, msg, **kwargs)
def input(msg, **kwargs):
"""Display an input label containing the given message and prompt for user input."""
color, mark = _get_color_and_mark('input', kwargs)
return _input_label(color, mark, msg, **kwargs)
def password(msg, **kwargs):
"""Display a password label containing the given message and prompt for user input."""
color, mark = _get_color_and_mark('password', kwargs)
_print_label(color, mark, msg, newline=False, **kwargs)
return getpass.getpass('')
def newline():
"""Print an empty line."""
_inline_write('\n')
if is_tty and platform.system() == 'Windows': # Initialize colorama on Windows.
import colorama
colorama.init()
__all__ = ['color_code', 'BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE',
'BRIGHT_BLACK', 'BRIGHT_RED', 'BRIGHT_GREEN', 'BRIGHT_YELLOW', 'BRIGHT_BLUE', 'BRIGHT_MAGENTA',
'BRIGHT_CYAN', 'BRIGHT_WHITE', 'COLOR_RESET', 'PROGRESS_STATIC', 'PROGRESS_SPIN', 'PROGRESS_EXPAND',
'PROGRESS_MOVE', 'PROGRESS_DETERMINATE', 'config', 'section', 'item', 'success', 'warning', 'error',
'info', 'progress', 'plain', 'question', 'input', 'password', 'newline']
|
custom_paramiko_expect.py
|
#
# Paramiko Expect
#
# Written by Fotis Gimian
# http://github.com/fgimian
#
# This library works with a Paramiko SSH channel to provide native SSH
# expect-like handling for servers. The library may be used to interact
# with commands like 'configure' or Cisco IOS devices or with interactive
# Unix scripts or commands.
#
# You must have Paramiko installed in order to use this library.
#
from __future__ import unicode_literals
import sys
import re
import socket
import struct
# Windows does not have termios
try:
import termios
import tty
has_termios = True
except ImportError: # pragma: no cover
import threading
has_termios = False
import select
def strip_ansi_codes(s):
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', '', s)
def default_output_func(msg):
sys.stdout.write(msg)
sys.stdout.flush()
class SSHClientInteraction(object):
"""
This class allows an expect-like interface to Paramiko which allows
coders to interact with applications and the shell of the connected
device.
:param client: A Paramiko SSHClient object
:param timeout: The connection timeout in seconds
:param newline: The newline character to send after each command
:param buffer_size: The amount of data (in bytes) that will be read at
a time after a command is run
:param display: Whether or not the output should be displayed in
real-time as it is being performed (especially useful
when debugging)
:param encoding: The character encoding to use.
"""
def __init__(
self, client, timeout=60, newline='\r', buffer_size=1024,
display=False, encoding='utf-8', output_callback=default_output_func,
tty_width=80, tty_height=24
):
self.channel = client.invoke_shell(width=tty_width, height=tty_height)
self.timeout = timeout
self.newline = newline
self.buffer_size = buffer_size
self.display = display
self.encoding = encoding
self.output_callback = output_callback
self.current_output = ''
self.current_output_clean = ''
self.current_send_string = ''
self.last_match = ''
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Attempts to close the channel for clean completion."""
try:
self.channel.close()
except Exception:
pass
def read_current(
self, timeout=None, strip_ansi=True
):
# Set the channel timeout
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
# Create an empty output buffer
self.current_output = ''
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
if len(current_buffer) == 0:
return -1
# Convert the buffer to our chosen encoding
current_buffer_decoded = current_buffer.decode(self.encoding)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
#current_buffer_decoded = current_buffer_decoded.replace('\b', '\b \b')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if self.display:
output_callback(current_buffer_decoded)
#if strip_ansi:
# current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
return self.current_output
def expect(
self, re_strings='', timeout=None, output_callback=None, default_match_prefix='.*\n',
strip_ansi=True
):
"""
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
# Create an empty output buffer
self.current_output = ''
# This function needs all regular expressions to be in the form of a
# list, so if the user provided a string, let's convert it to a 1
# item list.
if isinstance(re_strings, str) and len(re_strings) != 0:
re_strings = [re_strings]
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
):
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(current_buffer) == 0:
break
# Convert the buffer to our chosen encoding
current_buffer_decoded = current_buffer.decode(self.encoding)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
current_buffer_decoded = current_buffer_decoded.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if self.display:
output_callback(current_buffer_decoded)
if strip_ansi:
current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
# Clean the output up by removing the sent command
self.current_output_clean = self.current_output
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + '\n', ''
)
)
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
# Clean the output up by removing the expect output from the end if
# requested and save the details of the matched pattern
if len(re_strings) != 0 and len(found_pattern) != 0:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + '$', '', self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
# We would socket timeout before getting here, but for good
# measure, let's send back a -1
return -1
def send(self, send_string, newline=None):
"""Saves and sends the send string provided."""
self.current_send_string = send_string
newline = newline if newline is not None else self.newline
self.channel.send(send_string)
def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
"""
This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout to the maximum integer the server allows,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of teh script
timeout = timeout if timeout else 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = current_line.decode(self.encoding)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b''
def take_control(self):
"""
This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko.
"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer.decode(self.encoding))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass
|
uvcal.py
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer calibration solutions."""
import copy
import numpy as np
import threading
import warnings
from ..uvbase import UVBase
from .. import parameter as uvp
from .. import telescopes as uvtel
from .. import utils as uvutils
__all__ = ["UVCal"]
class UVCal(UVBase):
"""
A class defining calibration solutions.
Currently supported file types: calfits
Attributes
----------
UVParameter objects: For full list see UVCal Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvcal_parameters.html).
Some are always required, some are required for certain cal_types
and others are always optional.
"""
def __init__(self):
self._Nfreqs = uvp.UVParameter(
"Nfreqs", description="Number of frequency channels", expected_type=int
)
self._Njones = uvp.UVParameter(
"Njones",
description="Number of Jones calibration"
"parameters (Number of Jones matrix elements "
"calculated in calibration).",
expected_type=int,
)
desc = (
"Number of times with different calibrations calculated "
"(if a calibration is calculated over a range of integrations, "
"this gives the number of separate calibrations along the time axis)."
)
self._Ntimes = uvp.UVParameter("Ntimes", description=desc, expected_type=int)
self._history = uvp.UVParameter(
"history",
description="String of history, units English",
form="str",
expected_type=str,
)
self._Nspws = uvp.UVParameter(
"Nspws",
description="Number of spectral windows "
"(ie non-contiguous spectral chunks). "
"More than one spectral window is not "
"currently supported.",
expected_type=int,
)
desc = (
"Time range (in JD) that cal solutions are valid for."
"list: [start_time, end_time] in JD. Should only be set in Ntimes is 1."
)
self._time_range = uvp.UVParameter(
"time_range", description=desc, form=2, expected_type=float, required=False
)
desc = "Name of telescope. e.g. HERA. String."
self._telescope_name = uvp.UVParameter(
"telescope_name", description=desc, form="str", expected_type=str
)
desc = (
"Telescope location: xyz in ITRF (earth-centered frame). "
"Can also be accessed using telescope_location_lat_lon_alt or "
"telescope_location_lat_lon_alt_degrees properties"
)
self._telescope_location = uvp.LocationParameter(
"telescope_location",
description=desc,
acceptable_range=(6.35e6, 6.39e6),
tols=1e-3,
required=False,
)
desc = (
"Number of antennas that have data associated with them "
"(i.e. length of ant_array), which may be smaller than the number"
"of antennas in the telescope (i.e. length of antenna_numbers)."
)
self._Nants_data = uvp.UVParameter(
"Nants_data", description=desc, expected_type=int
)
desc = (
"Number of antennas in the antenna_numbers array. May be larger "
"than the number of antennas with gains associated with them."
)
self._Nants_telescope = uvp.UVParameter(
"Nants_telescope", description=desc, expected_type=int
)
desc = (
"Array of integer antenna numbers that appear in self.gain_array,"
" with shape (Nants_data,). "
"This array is ordered to match the inherent ordering of the zeroth"
" axis of self.gain_array."
)
self._ant_array = uvp.UVParameter(
"ant_array", description=desc, expected_type=int, form=("Nants_data",)
)
desc = (
"Array of antenna names with shape (Nants_telescope,). "
"Ordering of elements matches ordering of antenna_numbers."
)
self._antenna_names = uvp.UVParameter(
"antenna_names",
description=desc,
form=("Nants_telescope",),
expected_type=str,
)
desc = (
"Array of all integer-valued antenna numbers in the telescope with "
"shape (Nants_telescope,). Ordering of elements matches that of "
"antenna_names. This array is not necessarily identical to "
"ant_array, in that this array holds all antenna numbers "
"associated with the telescope, not just antennas with data, and "
"has an in principle non-specific ordering."
)
self._antenna_numbers = uvp.UVParameter(
"antenna_numbers",
description=desc,
form=("Nants_telescope",),
expected_type=int,
)
desc = (
"Array giving coordinates of antennas relative to "
"telescope_location (ITRF frame), shape (Nants_telescope, 3), "
"units meters. See the tutorial page in the documentation "
"for an example of how to convert this to topocentric frame."
)
self._antenna_positions = uvp.UVParameter(
"antenna_positions",
description=desc,
form=("Nants_telescope", 3),
expected_type=float,
tols=1e-3, # 1 mm
required=False,
)
self._spw_array = uvp.UVParameter(
"spw_array",
description="Array of spectral window numbers, shape (Nspws).",
form=("Nspws",),
expected_type=int,
)
desc = (
"Array of frequencies, center of the channel, "
"shape (Nspws, Nfreqs), units Hz."
)
self._freq_array = uvp.UVParameter(
"freq_array",
description=desc,
form=("Nspws", "Nfreqs"),
expected_type=float,
tols=1e-3,
) # mHz
desc = "Channel width of of a frequency bin. Units Hz."
self._channel_width = uvp.UVParameter(
"channel_width", description=desc, expected_type=float, tols=1e-3
)
desc = (
"Array of antenna polarization integers, shape (Njones). "
"linear pols -5:-8 (jxx, jyy, jxy, jyx)."
"circular pols -1:-4 (jrr, jll. jrl, jlr)."
)
self._jones_array = uvp.UVParameter(
"jones_array",
description=desc,
expected_type=int,
acceptable_vals=list(np.arange(-8, 0)),
form=("Njones",),
)
desc = (
"Array of calibration solution times, center of integration, "
"shape (Ntimes), units Julian Date"
)
self._time_array = uvp.UVParameter(
"time_array",
description=desc,
form=("Ntimes",),
expected_type=float,
tols=1e-3 / (60.0 * 60.0 * 24.0),
)
# standard angle tolerance: 10 mas in radians.
# Should perhaps be decreased to 1 mas in the future
radian_tol = 10 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
desc = "Array of lsts, center of integration, shape (Ntimes), units radians"
self._lst_array = uvp.UVParameter(
"lst_array",
description=desc,
form=("Ntimes",),
expected_type=float,
tols=radian_tol,
required=False,
)
desc = "Integration time of a time bin, units seconds."
self._integration_time = uvp.UVParameter(
"integration_time", description=desc, expected_type=float, tols=1e-3
) # 1ms
desc = (
"The convention for applying the calibration solutions to data."
'Values are "divide" or "multiply", indicating that to calibrate '
"one should divide or multiply uncalibrated data by gains. "
"Mathematically this indicates the alpha exponent in the equation: "
"calibrated data = gain^alpha * uncalibrated data. A value of "
'"divide" represents alpha=-1 and "multiply" represents alpha=1.'
)
self._gain_convention = uvp.UVParameter(
"gain_convention",
form="str",
expected_type=str,
description=desc,
acceptable_vals=["divide", "multiply"],
)
desc = (
"Array of flags to be applied to calibrated data (logical OR "
"of input and flag generated by calibration). True is flagged. "
"Shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), type = bool."
)
self._flag_array = uvp.UVParameter(
"flag_array",
description=desc,
form=("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Njones"),
expected_type=bool,
)
desc = (
"Array of qualities of calibration solutions. "
"The shape depends on cal_type, if the cal_type is 'gain' or "
"'unknown', the shape is: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), "
"if the cal_type is 'delay', the shape is "
"(Nants_data, Nspws, 1, Ntimes, Njones). The type is float."
)
self._quality_array = uvp.UVParameter(
"quality_array",
description=desc,
form=("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Njones"),
expected_type=float,
)
desc = (
"Orientation of the physical dipole corresponding to what is "
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
"north/south orientation)"
)
self._x_orientation = uvp.UVParameter(
"x_orientation",
description=desc,
expected_type=str,
acceptable_vals=["east", "north"],
)
# --- cal_type parameters ---
desc = "cal type parameter. Values are delay, gain or unknown."
self._cal_type = uvp.UVParameter(
"cal_type",
form="str",
expected_type=str,
value="unknown",
description=desc,
acceptable_vals=["delay", "gain", "unknown"],
)
desc = (
'Required if cal_type = "gain". Array of gains, '
"shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), type = complex float."
)
self._gain_array = uvp.UVParameter(
"gain_array",
description=desc,
required=False,
form=("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Njones"),
expected_type=complex,
)
desc = (
'Required if cal_type = "delay". Array of delays with units of seconds. '
"Shape: (Nants_data, Nspws, 1, Ntimes, Njones), type = float."
)
self._delay_array = uvp.UVParameter(
"delay_array",
description=desc,
required=False,
form=("Nants_data", "Nspws", 1, "Ntimes", "Njones"),
expected_type=float,
)
desc = (
"Required if cal_type = 'delay'. Frequency range that solutions "
"are valid for. list: [start_frequency, end_frequency] in Hz."
)
self._freq_range = uvp.UVParameter(
"freq_range",
required=False,
description=desc,
form=2,
expected_type=float,
tols=1e-3,
)
# --- cal_style parameters ---
desc = "Style of calibration. Values are sky or redundant."
self._cal_style = uvp.UVParameter(
"cal_style",
form="str",
expected_type=str,
description=desc,
acceptable_vals=["sky", "redundant"],
)
desc = (
'Required if cal_style = "sky". Short string describing field '
"center or dominant source."
)
self._sky_field = uvp.UVParameter(
"sky_field", form="str", required=False, expected_type=str, description=desc
)
desc = 'Required if cal_style = "sky". Name of calibration catalog.'
self._sky_catalog = uvp.UVParameter(
"sky_catalog",
form="str",
required=False,
expected_type=str,
description=desc,
)
desc = 'Required if cal_style = "sky". Phase reference antenna.'
self._ref_antenna_name = uvp.UVParameter(
"ref_antenna_name",
form="str",
required=False,
expected_type=str,
description=desc,
)
desc = "Number of sources used."
self._Nsources = uvp.UVParameter(
"Nsources", required=False, expected_type=int, description=desc
)
desc = "Range of baselines used for calibration."
self._baseline_range = uvp.UVParameter(
"baseline_range",
form=2,
required=False,
expected_type=float,
description=desc,
)
desc = "Name of diffuse model."
self._diffuse_model = uvp.UVParameter(
"diffuse_model",
form="str",
required=False,
expected_type=str,
description=desc,
)
# --- truly optional parameters ---
desc = (
"The gain scale of the calibration, which indicates the units of the "
"calibrated visibilities. For example, Jy or K str."
)
self._gain_scale = uvp.UVParameter(
"gain_scale",
form="str",
expected_type=str,
description=desc,
required=False,
)
desc = (
"Array of input flags, True is flagged. shape: (Nants_data, Nspws, "
"Nfreqs, Ntimes, Njones), type = bool."
)
self._input_flag_array = uvp.UVParameter(
"input_flag_array",
description=desc,
required=False,
form=("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Njones"),
expected_type=bool,
)
desc = "Origin (on github for e.g) of calibration software. Url and branch."
self._git_origin_cal = uvp.UVParameter(
"git_origin_cal",
form="str",
expected_type=str,
description=desc,
required=False,
)
desc = (
"Commit hash of calibration software (from git_origin_cal) used "
"to generate solutions."
)
self._git_hash_cal = uvp.UVParameter(
"git_hash_cal",
form="str",
expected_type=str,
description=desc,
required=False,
)
desc = "Name of observer who calculated solutions in this file."
self._observer = uvp.UVParameter(
"observer", form="str", description=desc, expected_type=str, required=False
)
desc = (
"Array of qualities of the calibration for entire arrays. "
'The shape depends on cal_type, if the cal_type is "gain" or '
'"unknown", the shape is: (Nspws, Nfreqs, Ntimes, Njones), '
'if the cal_type is "delay", the shape is (Nspws, 1, Ntimes, Njones), '
"type = float."
)
self._total_quality_array = uvp.UVParameter(
"total_quality_array",
description=desc,
form=("Nspws", "Nfreqs", "Ntimes", "Njones"),
expected_type=float,
required=False,
)
desc = (
"Any user supplied extra keywords, type=dict. Keys should be "
"8 character or less strings if writing to calfits files. "
'Use the special key "comment" for long multi-line string comments.'
)
self._extra_keywords = uvp.UVParameter(
"extra_keywords",
required=False,
description=desc,
value={},
spoof_val={},
expected_type=dict,
)
desc = (
"List of strings containing the unique basenames (not the full path) of "
"input files."
)
self._filename = uvp.UVParameter(
"filename", required=False, description=desc, expected_type=str,
)
super(UVCal, self).__init__()
def _set_gain(self):
"""Set cal_type to 'gain' and adjust required parameters."""
self.cal_type = "gain"
self._gain_array.required = True
self._delay_array.required = False
self._freq_range.required = False
self._quality_array.form = self._gain_array.form
self._total_quality_array.form = self._gain_array.form[1:]
def _set_delay(self):
"""Set cal_type to 'delay' and adjust required parameters."""
self.cal_type = "delay"
self._gain_array.required = False
self._delay_array.required = True
self._freq_range.required = True
self._quality_array.form = self._delay_array.form
self._total_quality_array.form = self._delay_array.form[1:]
def _set_unknown_cal_type(self):
"""Set cal_type to 'unknown' and adjust required parameters."""
self.cal_type = "unknown"
self._gain_array.required = False
self._delay_array.required = False
self._freq_range.required = False
self._quality_array.form = self._gain_array.form
self._total_quality_array.form = self._gain_array.form[1:]
def _set_sky(self):
"""Set cal_style to 'sky' and adjust required parameters."""
self.cal_style = "sky"
self._sky_field.required = True
self._sky_catalog.required = True
self._ref_antenna_name.required = True
def _set_redundant(self):
"""Set cal_style to 'redundant' and adjust required parameters."""
self.cal_style = "redundant"
self._sky_field.required = False
self._sky_catalog.required = False
self._ref_antenna_name.required = False
@property
def _data_params(self):
"""List of strings giving the data-like parameters."""
return [
"gain_array",
"delay_array",
"flag_array",
"quality_array",
"total_quality_array",
"input_flag_array",
]
@property
def data_like_parameters(self):
"""Iterate defined parameters which are data-like (not metadata-like)."""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def metadata_only(self):
"""
Property that determines whether this is a metadata only object.
An object is metadata only if data_array, nsample_array and flag_array
are all None.
"""
metadata_only = all(d is None for d in self.data_like_parameters)
cal_type = self._cal_type.value
if cal_type is None:
cal_type = "unknown"
required_params = {
"gain": ["gain_array", "flag_array", "quality_array"],
"delay": ["delay_array", "flag_array", "quality_array"],
"unknown": ["flag_array", "quality_array"],
}
for param_name in self._data_params:
if param_name in required_params[cal_type]:
getattr(self, "_" + param_name).required = not metadata_only
return metadata_only
def set_telescope_params(self, overwrite=False):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set the telescope
location to the value for the known telescope. Also set the antenna positions
if they are not set on the object and are available for the telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
if self.telescope_location is None or overwrite is True:
warnings.warn(
"telescope_location is not set. Using known values "
f"for {telescope_obj.telescope_name}."
)
self.telescope_location = telescope_obj.telescope_location
if telescope_obj.antenna_positions is not None and (
self.antenna_positions is None or overwrite is True
):
ant_inds = []
telescope_ant_inds = []
# first try to match using names only
for index, antname in enumerate(self.antenna_names):
if antname in telescope_obj.antenna_names:
ant_inds.append(index)
telescope_ant_inds.append(
np.where(telescope_obj.antenna_names == antname)[0][0]
)
# next try using numbers
if len(ant_inds) != self.Nants_telescope:
for index, antnum in enumerate(self.antenna_numbers):
# only update if not already found
if (
index not in ant_inds
and antnum in telescope_obj.antenna_numbers
):
this_ant_ind = np.where(
telescope_obj.antenna_numbers == antnum
)[0][0]
# make sure we don't already have this antenna associated
# with another antenna
if this_ant_ind not in telescope_ant_inds:
ant_inds.append(index)
telescope_ant_inds.append(this_ant_ind)
if len(ant_inds) != self.Nants_telescope:
warnings.warn(
"Not all antennas have positions in the telescope object. "
"Not setting antenna_positions."
)
else:
warnings.warn(
"antenna_positions is not set. Using known values "
f"for {telescope_obj.telescope_name}."
)
telescope_ant_inds = np.array(telescope_ant_inds)
self.antenna_positions = telescope_obj.antenna_positions[
telescope_ant_inds, :
]
else:
raise ValueError(
f"Telescope {self.telescope_name} is not in known_telescopes."
)
def _set_lsts_helper(self):
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(
unique_times, latitude, longitude, altitude
)
self.lst_array = unique_lst_array[inverse_inds]
return
def set_lsts_from_time_array(self, background=False):
"""Set the lst_array based from the time_array.
Parameters
----------
background : bool, False
When set to True, start the calculation on a threading.Thread in the
background and return the thread to the user.
Returns
-------
proc : None or threading.Thread instance
When background is set to True, a thread is returned which must be
joined before the lst_array exists on the UVData object.
"""
if not background:
self._set_lsts_helper()
return
else:
proc = threading.Thread(target=self._set_lsts_helper)
proc.start()
return proc
def check(self, check_extra=True, run_check_acceptability=True):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# Make sure requirements are set properly for cal_style
if self.cal_style == "sky":
self._set_sky()
elif self.cal_style == "redundant":
self._set_redundant()
# If the telescope location is not set issue a deprecation warning
if self.telescope_location is None:
warnings.warn(
"The telescope_location is not set. It will be a required "
"parameter starting in pyuvdata version 2.3",
category=DeprecationWarning,
)
# If the antenna positions parameter is not set issue a deprecation warning
if self.antenna_positions is None:
warnings.warn(
"The antenna_positions parameter is not set. It will be a required "
"parameter starting in pyuvdata version 2.3",
category=DeprecationWarning,
)
# If the antenna positions parameter is not set issue a deprecation warning
if self.lst_array is None:
warnings.warn(
"The lst_array is not set. It will be a required "
"parameter starting in pyuvdata version 2.3",
category=DeprecationWarning,
)
# first run the basic check from UVBase
super(UVCal, self).check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
# require that all entries in ant_array exist in antenna_numbers
if not all(ant in self.antenna_numbers for ant in self.ant_array):
raise ValueError("All antennas in ant_array must be in antenna_numbers.")
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn(
"key {key} in extra_keywords is longer than 8 "
"characters. It will be truncated to 8 if written "
"to a calfits file format.".format(key=key)
)
# issue warning if extra_keywords values are lists, arrays or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, np.ndarray)):
warnings.warn(
"{key} in extra_keywords is a list, array or dict, "
"which will raise an error when writing calfits "
"files".format(key=key)
)
return True
def copy(self, metadata_only=False):
"""
Make and return a copy of the UVCal object.
Parameters
----------
metadata_only : bool
If True, only copy the metadata of the object.
Returns
-------
UVCal
Copy of self.
"""
if not metadata_only:
return super(UVCal, self).copy()
else:
uv = UVCal()
# include all attributes, not just UVParameter ones.
for attr in self.__iter__(uvparams_only=False):
# skip properties
if isinstance(getattr(type(self), attr, None), property):
continue
# skip data like parameters
# parameter names have a leading underscore we want to ignore
if attr.lstrip("_") in self._data_params:
continue
setattr(uv, attr, copy.deepcopy(getattr(self, attr)))
return uv
def _has_key(self, antnum=None, jpol=None):
"""
Check if this UVCal has the requested antenna or polarization.
Parameters
----------
antnum : int
Antenna number to check.
jpol : str or int
Antenna polarization string or integer to check.
Returns
-------
bool
Boolean indicator of whether the antenna and/or antenna
polarization is present on this object.
"""
if antnum is not None:
if antnum not in self.ant_array:
return False
if jpol is not None:
if isinstance(jpol, (str, np.str_)):
jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation)
if jpol not in self.jones_array:
return False
return True
def ant2ind(self, antnum):
"""
Get the index in data arrays for an antenna number.
Parameters
----------
antnum : int
Antenna number to get index for.
Returns
-------
int
Antenna index in data arrays.
"""
if not self._has_key(antnum=antnum):
raise ValueError("{} not found in ant_array".format(antnum))
return np.argmin(np.abs(self.ant_array - antnum))
def jpol2ind(self, jpol):
"""
Get the index in data arrays for an antenna polarization.
Parameters
----------
jpol : int or str
Antenna polarization to get index for.
Returns
-------
int
Antenna polarization index in data arrays
"""
if isinstance(jpol, (str, np.str_)):
jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation)
if not self._has_key(jpol=jpol):
raise ValueError("{} not found in jones_array".format(jpol))
return np.argmin(np.abs(self.jones_array - jpol))
def _slice_array(self, key, data_array, squeeze_pol=True):
"""
Slice a data array given a data key.
Parameters
----------
key : int or length 2 tuple of ints or int and str
Antenna or antenna and polarization to get slice for. If it's a length
2 tuple, the second value must be an antenna polarization int or string
parsable by jpol2ind.
data_array : :class: numpy ndarray
Array to get slice of. Must have the shape of the gain_array or delay_array.
squeeze_pol : bool
Option to squeeze pol dimension if possible.
Returns
-------
:class: numpy ndarray
Slice of the data_array for the key.
"""
key = uvutils._get_iterable(key)
if len(key) == 1:
# interpret as a single antenna
output = data_array[self.ant2ind(key[0]), 0, :, :, :]
if squeeze_pol and output.shape[-1] == 1:
output = output[:, :, 0]
return output
elif len(key) == 2:
# interpret as an antenna-pol pair
return data_array[self.ant2ind(key[0]), 0, :, :, self.jpol2ind(key[1])]
def _parse_key(self, ant, jpol=None):
"""
Parse key inputs and return a standard antenna-polarization key.
Parameters
----------
ant : int or length 2 tuple of ints or int and str
Antenna or antenna and polarization to get key for. If it's a length
2 tuple, the second value must be an antenna polarization int or string
parsable by jpol2ind.
jpol : int or str
Antenna polarization int or string parsable by jpol2ind. Only used
if `ant` is an integer.
Returns
-------
tuple
Standard key tuple.
"""
if isinstance(ant, (list, tuple)):
# interpret ant as (ant,) or (ant, jpol)
key = tuple(ant)
elif isinstance(ant, (int, np.integer)):
# interpret ant as antenna number
key = (ant,)
# add jpol if fed
if jpol is not None:
key += (jpol,)
return key
def get_gains(self, ant, jpol=None, squeeze_pol=True):
"""
Get the gain associated with an antenna and/or polarization.
Parameters
----------
ant : int or length 2 tuple of ints or int and str
Antenna or antenna and polarization to get gains for. If it's a length
2 tuple, the second value must be an antenna polarization int or string
parsable by jpol2ind.
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
squeeze_pol : bool
Option to squeeze pol dimension if possible.
Returns
-------
complex ndarray
Gain solution of shape (Nfreqs, Ntimes, Npol) or (Nfreqs, Ntimes)
if jpol is set or if squeeze_pol is True and Njones = 1.
"""
if self.cal_type != "gain":
raise ValueError("cal_type must be 'gain' for get_gains() method")
return self._slice_array(
self._parse_key(ant, jpol=jpol), self.gain_array, squeeze_pol=squeeze_pol
)
def get_flags(self, ant, jpol=None, squeeze_pol=True):
"""
Get the flags associated with an antenna and/or polarization.
Parameters
----------
ant : int or length 2 tuple of ints or int and str
Antenna or antenna and polarization to get gains for. If it's a length
2 tuple, the second value must be an antenna polarization int or string
parsable by jpol2ind.
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
squeeze_pol : bool
Option to squeeze pol dimension if possible.
Returns
-------
boolean ndarray
Flags of shape (Nfreqs, Ntimes, Npol) or (Nfreqs, Ntimes)
if jpol is set or if squeeze_pol is True and Njones = 1.
"""
return self._slice_array(
self._parse_key(ant, jpol=jpol), self.flag_array, squeeze_pol=squeeze_pol
)
def get_quality(self, ant, jpol=None, squeeze_pol=True):
"""
Get the qualities associated with an antenna and/or polarization.
Parameters
----------
ant : int or length 2 tuple of ints or int and str
Antenna or antenna and polarization to get gains for. If it's a length
2 tuple, the second value must be an antenna polarization int or string
parsable by jpol2ind.
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
squeeze_pol : bool
Option to squeeze pol dimension if possible.
Returns
-------
float ndarray
Qualities of shape (Nfreqs, Ntimes, Npol) or (Nfreqs, Ntimes)
if jpol is not None or if squeeze_pol is True and Njones = 1.
"""
return self._slice_array(
self._parse_key(ant, jpol=jpol), self.quality_array, squeeze_pol=squeeze_pol
)
def convert_to_gain(
self,
delay_convention="minus",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Convert non-gain cal_types to gains.
For the delay cal_type the gain is calculated as:
gain = 1 * exp((+/-) * 2 * pi * j * delay * frequency)
where the (+/-) is dictated by the delay_convention
Parameters
----------
delay_convention : str
Exponent sign to use in the conversion, can be "plus" or "minus".
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting.
"""
if self.cal_type == "gain":
raise ValueError("The data is already a gain cal_type.")
elif self.cal_type == "delay":
if delay_convention == "minus":
conv = -1
elif delay_convention == "plus":
conv = 1
else:
raise ValueError('delay_convention can only be "minus" or "plus"')
self.history += " Converted from delays to gains using pyuvdata."
phase_array = np.zeros(
(self.Nants_data, self.Nspws, self.Nfreqs, self.Ntimes, self.Njones)
)
for si in range(self.Nspws):
temp = (
conv
* 2
* np.pi
* np.dot(
self.delay_array[:, si, 0, :, :, np.newaxis],
self.freq_array[si, np.newaxis, :],
)
)
temp = np.transpose(temp, (0, 3, 1, 2))
phase_array[:, si, :, :, :] = temp
gain_array = np.exp(1j * phase_array)
new_quality = np.repeat(
self.quality_array[:, :, :, :, :], self.Nfreqs, axis=2
)
self._set_gain()
self.gain_array = gain_array
self.quality_array = new_quality
self.delay_array = None
if self.total_quality_array is not None:
new_total_quality_array = np.repeat(
self.total_quality_array[:, :, :, :], self.Nfreqs, axis=1
)
self.total_quality_array = new_total_quality_array
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
else:
raise ValueError("cal_type is unknown, cannot convert to gain")
def __add__(
self,
other,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
inplace=False,
):
"""
Combine two UVCal objects along antenna, frequency, time, and/or Jones axis.
Parameters
----------
other : :class: UVCal
Another UVCal object which will be added to self.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
two objects are different, the combined object will keep all the history of
both input objects (if many objects are combined in succession this can
lead to very long histories). If False and if the histories for the two
objects are different, the combined object will have the history of the
first object and only the parts of the second object history that are unique
(this is done word by word and can result in hard to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
inplace : bool
Option to overwrite self as we go, otherwise create a third object
as the sum of the two.
"""
if inplace:
this = self
else:
this = self.copy()
# Check that both objects are UVCal and valid
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError(
"Only UVCal (or subclass) objects can be added to "
"a UVCal (or subclass) object"
)
other.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
# Check objects are compatible
compatibility_params = [
"_cal_type",
"_integration_time",
"_channel_width",
"_telescope_name",
"_gain_convention",
"_x_orientation",
"_cal_style",
"_ref_antenna_name",
]
if this.cal_type == "delay":
compatibility_params.append("_freq_range")
warning_params = [
"_observer",
"_git_hash_cal",
"_sky_field",
"_sky_catalog",
"_Nsources",
"_baseline_range",
"_diffuse_model",
]
for a in compatibility_params:
if getattr(this, a) != getattr(other, a):
msg = (
"UVParameter " + a[1:] + " does not match. Cannot combine objects."
)
raise ValueError(msg)
for a in warning_params:
if getattr(this, a) != getattr(other, a):
msg = "UVParameter " + a[1:] + " does not match. Combining anyway."
warnings.warn(msg)
# Build up history string
history_update_string = " Combined data along "
n_axes = 0
# Check we don't have overlapping data
both_jones = np.intersect1d(this.jones_array, other.jones_array)
both_times = np.intersect1d(this.time_array, other.time_array)
if this.cal_type != "delay":
both_freq = np.intersect1d(this.freq_array[0, :], other.freq_array[0, :])
else:
# Make a non-empty array so we raise an error if other data is duplicated
both_freq = [0]
both_ants = np.intersect1d(this.ant_array, other.ant_array)
if len(both_jones) > 0:
if len(both_times) > 0:
if len(both_freq) > 0:
if len(both_ants) > 0:
raise ValueError(
"These objects have overlapping data and"
" cannot be combined."
)
# Update filename parameter
this.filename = uvutils._combine_filenames(this.filename, other.filename)
if this.filename is not None:
this._filename.form = (len(this.filename),)
temp = np.nonzero(~np.in1d(other.ant_array, this.ant_array))[0]
if len(temp) > 0:
anew_inds = temp
history_update_string += "antenna"
n_axes += 1
else:
anew_inds = []
temp = np.nonzero(~np.in1d(other.time_array, this.time_array))[0]
if len(temp) > 0:
tnew_inds = temp
if n_axes > 0:
history_update_string += ", time"
else:
history_update_string += "time"
n_axes += 1
else:
tnew_inds = []
# adding along frequency axis is not supported for delay-type cal files
if this.cal_type == "gain":
temp = np.nonzero(~np.in1d(other.freq_array[0, :], this.freq_array[0, :]))[
0
]
if len(temp) > 0:
fnew_inds = temp
if n_axes > 0:
history_update_string += ", frequency"
else:
history_update_string += "frequency"
n_axes += 1
else:
fnew_inds = []
else:
fnew_inds = []
temp = np.nonzero(~np.in1d(other.jones_array, this.jones_array))[0]
if len(temp) > 0:
jnew_inds = temp
if n_axes > 0:
history_update_string += ", jones"
else:
history_update_string += "jones"
n_axes += 1
else:
jnew_inds = []
# Initialize tqa variables
can_combine_tqa = True
if this.cal_type == "delay":
Nf_tqa = 1
else:
Nf_tqa = this.Nfreqs
# Pad out self to accommodate new data
if len(anew_inds) > 0:
this.ant_array = np.concatenate(
[this.ant_array, other.ant_array[anew_inds]]
)
order = np.argsort(this.ant_array)
this.ant_array = this.ant_array[order]
if not self.metadata_only:
zero_pad_data = np.zeros(
(
len(anew_inds),
this.Nspws,
this.quality_array.shape[2],
this.Ntimes,
this.Njones,
)
)
zero_pad_flags = np.zeros(
(len(anew_inds), this.Nspws, this.Nfreqs, this.Ntimes, this.Njones)
)
if this.cal_type == "delay":
this.delay_array = np.concatenate(
[this.delay_array, zero_pad_data], axis=0
)[order, :, :, :, :]
else:
this.gain_array = np.concatenate(
[this.gain_array, zero_pad_data], axis=0
)[order, :, :, :, :]
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad_flags], axis=0
).astype(np.bool_)[order, :, :, :, :]
this.quality_array = np.concatenate(
[this.quality_array, zero_pad_data], axis=0
)[order, :, :, :, :]
# If total_quality_array exists, we set it to None and warn the user
if (
this.total_quality_array is not None
or other.total_quality_array is not None
):
warnings.warn(
"Total quality array detected in at least one file; the "
"array in the new object will be set to 'None' because "
"whole-array values cannot be combined when adding antennas"
)
this.total_quality_array = None
can_combine_tqa = False
if this.input_flag_array is not None:
zero_pad = np.zeros(
(
len(anew_inds),
this.Nspws,
this.Nfreqs,
this.Ntimes,
this.Njones,
)
)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=0
).astype(np.bool_)[order, :, :, :, :]
elif other.input_flag_array is not None:
zero_pad = np.zeros(
(
len(anew_inds),
this.Nspws,
this.Nfreqs,
this.Ntimes,
this.Njones,
)
)
this.input_flag_array = np.array(
1
- np.zeros(
(
this.Nants_data,
this.Nspws,
this.Nfreqs,
this.Ntimes,
this.Njones,
)
)
).astype(np.bool_)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=0
).astype(np.bool_)[order, :, :, :, :]
if len(fnew_inds) > 0:
# Exploit the fact that quality array has the same dimensions as the
# main data.
# Also do not need to worry about different cases for gain v. delay type
zero_pad = np.zeros(
(
this.quality_array.shape[0],
this.Nspws,
len(fnew_inds),
this.Ntimes,
this.Njones,
)
)
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array[:, fnew_inds]], axis=1
)
order = np.argsort(this.freq_array[0, :])
this.freq_array = this.freq_array[:, order]
if not self.metadata_only:
this.gain_array = np.concatenate([this.gain_array, zero_pad], axis=2)[
:, :, order, :, :
]
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)[:, :, order, :, :]
this.quality_array = np.concatenate(
[this.quality_array, zero_pad], axis=2
)[:, :, order, :, :]
if this.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(this.Nspws, len(fnew_inds), this.Ntimes, this.Njones)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=1
)[:, order, :, :]
elif other.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(this.Nspws, len(fnew_inds), this.Ntimes, this.Njones)
)
this.total_quality_array = np.zeros(
(this.Nspws, Nf_tqa, this.Ntimes, this.Njones)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=1
)[:, order, :, :]
if this.input_flag_array is not None:
zero_pad = np.zeros(
(
this.input_flag_array.shape[0],
this.Nspws,
len(fnew_inds),
this.Ntimes,
this.Njones,
)
)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)[:, :, order, :, :]
elif other.input_flag_array is not None:
zero_pad = np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
len(fnew_inds),
this.Ntimes,
this.Njones,
)
)
this.input_flag_array = np.array(
1
- np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
this.flag_array.shape[3],
this.Njones,
)
)
).astype(np.bool_)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)[:, :, order, :, :]
if len(tnew_inds) > 0:
# Exploit the fact that quality array has the same dimensions as
# the main data
this.time_array = np.concatenate(
[this.time_array, other.time_array[tnew_inds]]
)
this.lst_array = np.concatenate(
[this.lst_array, other.lst_array[tnew_inds]]
)
order = np.argsort(this.time_array)
this.time_array = this.time_array[order]
this.lst_array = this.lst_array[order]
if not self.metadata_only:
zero_pad_data = np.zeros(
(
this.quality_array.shape[0],
this.Nspws,
this.quality_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
zero_pad_flags = np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
if this.cal_type == "delay":
this.delay_array = np.concatenate(
[this.delay_array, zero_pad_data], axis=3
)[:, :, :, order, :]
else:
this.gain_array = np.concatenate(
[this.gain_array, zero_pad_data], axis=3
)[:, :, :, order, :]
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad_flags], axis=3
).astype(np.bool_)[:, :, :, order, :]
this.quality_array = np.concatenate(
[this.quality_array, zero_pad_data], axis=3
)[:, :, :, order, :]
if this.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(
this.Nspws,
this.quality_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=2
)[:, :, order, :]
elif other.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(
this.Nspws,
this.quality_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
this.total_quality_array = np.zeros(
(this.Nspws, Nf_tqa, this.Ntimes, this.Njones)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=2
)[:, :, order, :]
if this.input_flag_array is not None:
zero_pad = np.zeros(
(
this.input_flag_array.shape[0],
this.Nspws,
this.input_flag_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=3
).astype(np.bool_)[:, :, :, order, :]
elif other.input_flag_array is not None:
zero_pad = np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
len(tnew_inds),
this.Njones,
)
)
this.input_flag_array = np.array(
1
- np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
this.flag_array.shape[3],
this.Njones,
)
)
).astype(np.bool_)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=3
).astype(np.bool_)[:, :, :, order, :]
if len(jnew_inds) > 0:
# Exploit the fact that quality array has the same dimensions as
# the main data
this.jones_array = np.concatenate(
[this.jones_array, other.jones_array[jnew_inds]]
)
order = np.argsort(np.abs(this.jones_array))
this.jones_array = this.jones_array[order]
if not self.metadata_only:
zero_pad_data = np.zeros(
(
this.quality_array.shape[0],
this.Nspws,
this.quality_array.shape[2],
this.quality_array.shape[3],
len(jnew_inds),
)
)
zero_pad_flags = np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
this.flag_array.shape[3],
len(jnew_inds),
)
)
if this.cal_type == "delay":
this.delay_array = np.concatenate(
[this.delay_array, zero_pad_data], axis=4
)[:, :, :, :, order]
else:
this.gain_array = np.concatenate(
[this.gain_array, zero_pad_data], axis=4
)[:, :, :, :, order]
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad_flags], axis=4
).astype(np.bool_)[:, :, :, :, order]
this.quality_array = np.concatenate(
[this.quality_array, zero_pad_data], axis=4
)[:, :, :, :, order]
if this.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(
this.Nspws,
this.quality_array.shape[2],
this.quality_array.shape[3],
len(jnew_inds),
)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=3
)[:, :, :, order]
elif other.total_quality_array is not None and can_combine_tqa:
zero_pad = np.zeros(
(
this.Nspws,
this.quality_array.shape[2],
this.quality_array.shape[3],
len(jnew_inds),
)
)
this.total_quality_array = np.zeros(
(this.Nspws, Nf_tqa, this.Ntimes, this.Njones)
)
this.total_quality_array = np.concatenate(
[this.total_quality_array, zero_pad], axis=3
)[:, :, :, order]
if this.input_flag_array is not None:
zero_pad = np.zeros(
(
this.input_flag_array.shape[0],
this.Nspws,
this.input_flag_array.shape[2],
this.input_flag_array.shape[3],
len(jnew_inds),
)
)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=4
).astype(np.bool_)[:, :, :, :, order]
elif other.input_flag_array is not None:
zero_pad = np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
this.flag_array.shape[3],
len(jnew_inds),
)
)
this.input_flag_array = np.array(
1
- np.zeros(
(
this.flag_array.shape[0],
this.Nspws,
this.flag_array.shape[2],
this.flag_array.shape[3],
this.Njones,
)
)
).astype(np.bool_)
this.input_flag_array = np.concatenate(
[this.input_flag_array, 1 - zero_pad], axis=4
).astype(np.bool_)[:, :, :, :, order]
# Now populate the data
if not self.metadata_only:
jones_t2o = np.nonzero(np.in1d(this.jones_array, other.jones_array))[0]
times_t2o = np.nonzero(np.in1d(this.time_array, other.time_array))[0]
freqs_t2o = np.nonzero(
np.in1d(this.freq_array[0, :], other.freq_array[0, :])
)[0]
ants_t2o = np.nonzero(np.in1d(this.ant_array, other.ant_array))[0]
if this.cal_type == "delay":
this.delay_array[
np.ix_(ants_t2o, [0], [0], times_t2o, jones_t2o)
] = other.delay_array
this.quality_array[
np.ix_(ants_t2o, [0], [0], times_t2o, jones_t2o)
] = other.quality_array
else:
this.gain_array[
np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o)
] = other.gain_array
this.quality_array[
np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o)
] = other.quality_array
this.flag_array[
np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o)
] = other.flag_array
if this.total_quality_array is not None:
if other.total_quality_array is not None:
if this.cal_type == "delay":
this.total_quality_array[
np.ix_([0], [0], times_t2o, jones_t2o)
] = other.total_quality_array
else:
this.total_quality_array[
np.ix_([0], freqs_t2o, times_t2o, jones_t2o)
] = other.total_quality_array
if this.input_flag_array is not None:
if other.input_flag_array is not None:
this.input_flag_array[
np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o)
] = other.input_flag_array
# Update N parameters (e.g. Npols)
this.Njones = this.jones_array.shape[0]
this.Ntimes = this.time_array.shape[0]
if this.cal_type == "gain":
this.Nfreqs = this.freq_array.shape[1]
this.Nants_data = len(
np.unique(this.ant_array.tolist() + other.ant_array.tolist())
)
# Check specific requirements
if this.cal_type == "gain" and this.Nfreqs > 1:
freq_separation = np.diff(this.freq_array[0, :])
if not np.isclose(
np.min(freq_separation),
np.max(freq_separation),
rtol=this._freq_array.tols[0],
atol=this._freq_array.tols[1],
):
warnings.warn(
"Combined frequencies are not evenly spaced. This will "
"make it impossible to write this data out to some file types."
)
elif np.max(freq_separation) > this.channel_width:
warnings.warn(
"Combined frequencies are not contiguous. This will make "
"it impossible to write this data out to some file types."
)
if this.Njones > 2:
jones_separation = np.diff(this.jones_array)
if np.min(jones_separation) < np.max(jones_separation):
warnings.warn(
"Combined Jones elements are not evenly spaced. This will "
"make it impossible to write this data out to some file types."
)
if n_axes > 0:
history_update_string += " axis using pyuvdata."
histories_match = uvutils._check_histories(this.history, other.history)
this.history += history_update_string
if not histories_match:
if verbose_history:
this.history += " Next object history follows. " + other.history
else:
extra_history = uvutils._combine_history_addition(
this.history, other.history
)
if extra_history is not None:
this.history += (
" Unique part of next object history follows. "
+ extra_history
)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return this
def __iadd__(
self, other, run_check=True, check_extra=True, run_check_acceptability=True,
):
"""
Combine two UVCal objects in place.
Along antenna, frequency, time, and/or Jones axis.
Parameters
----------
other : :class: UVCal
Another UVCal object which will be added to self.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
"""
self.__add__(
other,
inplace=True,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
return self
def select(
self,
antenna_nums=None,
antenna_names=None,
frequencies=None,
freq_chans=None,
times=None,
jones=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
inplace=True,
):
"""
Downselect data to keep on the object along various axes.
Axes that can be selected along include antennas, frequencies, times and
antenna polarization (jones).
The history attribute on the object will be updated to identify the
operations performed.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained).
This cannot be provided if `antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained).
This cannot be provided if `antenna_nums` is also provided.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array.
jones : array_like of int or str, optional
The antenna polarizations numbers to keep in the object, each value
passed here should exist in the jones_array. If passing strings, the
canonical polarization strings (e.g. "Jxx", "Jrr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "Jnn", "Jee") are also supported.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
inplace : bool
Option to perform the select directly on self or return a new UVCal
object with just the selected data (the default is True, meaning the
select will be done on self).
"""
if inplace:
cal_object = self
else:
cal_object = self.copy()
# build up history string as we go
history_update_string = " Downselected to specific "
n_selects = 0
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can be provided."
)
antenna_names = uvutils._get_iterable(antenna_names)
antenna_nums = []
for s in antenna_names:
if s not in cal_object.antenna_names:
raise ValueError(
f"Antenna name {s} is not present in the antenna_names array"
)
ind = np.where(np.array(cal_object.antenna_names) == s)[0][0]
antenna_nums.append(cal_object.antenna_numbers[ind])
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
history_update_string += "antennas"
n_selects += 1
ant_inds = np.zeros(0, dtype=np.int64)
for ant in antenna_nums:
if ant in cal_object.ant_array:
ant_inds = np.append(
ant_inds, np.where(cal_object.ant_array == ant)[0]
)
else:
raise ValueError(
f"Antenna number {ant} is not present in the array"
)
ant_inds = sorted(set(ant_inds))
cal_object.Nants_data = len(ant_inds)
cal_object.ant_array = cal_object.ant_array[ant_inds]
if not self.metadata_only:
cal_object.flag_array = cal_object.flag_array[ant_inds, :, :, :, :]
cal_object.quality_array = cal_object.quality_array[
ant_inds, :, :, :, :
]
if cal_object.cal_type == "delay":
cal_object.delay_array = cal_object.delay_array[
ant_inds, :, :, :, :
]
else:
cal_object.gain_array = cal_object.gain_array[ant_inds, :, :, :, :]
if cal_object.input_flag_array is not None:
cal_object.input_flag_array = cal_object.input_flag_array[
ant_inds, :, :, :, :
]
if cal_object.total_quality_array is not None:
warnings.warn(
"Cannot preserve total_quality_array when changing "
"number of antennas; discarding"
)
cal_object.total_quality_array = None
if times is not None:
times = uvutils._get_iterable(times)
if n_selects > 0:
history_update_string += ", times"
else:
history_update_string += "times"
n_selects += 1
time_inds = np.zeros(0, dtype=np.int64)
for jd in times:
if jd in cal_object.time_array:
time_inds = np.append(
time_inds, np.where(cal_object.time_array == jd)[0]
)
else:
raise ValueError(
"Time {t} is not present in the time_array".format(t=jd)
)
time_inds = sorted(set(time_inds))
cal_object.Ntimes = len(time_inds)
cal_object.time_array = cal_object.time_array[time_inds]
if cal_object.lst_array is not None:
cal_object.lst_array = cal_object.lst_array[time_inds]
if cal_object.Ntimes > 1:
time_separation = np.diff(cal_object.time_array)
if not np.isclose(
np.min(time_separation),
np.max(time_separation),
rtol=cal_object._time_array.tols[0],
atol=cal_object._time_array.tols[1],
):
warnings.warn(
"Selected times are not evenly spaced. This "
"is not supported by the calfits format."
)
if not self.metadata_only:
cal_object.flag_array = cal_object.flag_array[:, :, :, time_inds, :]
cal_object.quality_array = cal_object.quality_array[
:, :, :, time_inds, :
]
if cal_object.cal_type == "delay":
cal_object.delay_array = cal_object.delay_array[
:, :, :, time_inds, :
]
else:
cal_object.gain_array = cal_object.gain_array[:, :, :, time_inds, :]
if cal_object.input_flag_array is not None:
cal_object.input_flag_array = cal_object.input_flag_array[
:, :, :, time_inds, :
]
if cal_object.total_quality_array is not None:
cal_object.total_quality_array = cal_object.total_quality_array[
:, :, time_inds, :
]
if freq_chans is not None:
freq_chans = uvutils._get_iterable(freq_chans)
if frequencies is None:
frequencies = cal_object.freq_array[0, freq_chans]
else:
frequencies = uvutils._get_iterable(frequencies)
frequencies = np.sort(
list(set(frequencies) | set(cal_object.freq_array[0, freq_chans]))
)
if frequencies is not None:
frequencies = uvutils._get_iterable(frequencies)
if n_selects > 0:
history_update_string += ", frequencies"
else:
history_update_string += "frequencies"
n_selects += 1
freq_inds = np.zeros(0, dtype=np.int64)
# this works because we only allow one SPW. This will have to be
# reworked when we support more.
freq_arr_use = cal_object.freq_array[0, :]
for f in frequencies:
if f in freq_arr_use:
freq_inds = np.append(freq_inds, np.where(freq_arr_use == f)[0])
else:
raise ValueError(
"Frequency {f} is not present in the freq_array".format(f=f)
)
freq_inds = sorted(set(freq_inds))
cal_object.Nfreqs = len(freq_inds)
cal_object.freq_array = cal_object.freq_array[:, freq_inds]
if cal_object.Nfreqs > 1:
freq_separation = (
cal_object.freq_array[0, 1:] - cal_object.freq_array[0, :-1]
)
if not np.isclose(
np.min(freq_separation),
np.max(freq_separation),
rtol=cal_object._freq_array.tols[0],
atol=cal_object._freq_array.tols[1],
):
warnings.warn(
"Selected frequencies are not evenly spaced. This "
"is not supported by the calfits format"
)
if not self.metadata_only:
cal_object.flag_array = cal_object.flag_array[:, :, freq_inds, :, :]
if cal_object.cal_type == "delay":
pass
else:
cal_object.quality_array = cal_object.quality_array[
:, :, freq_inds, :, :
]
cal_object.gain_array = cal_object.gain_array[:, :, freq_inds, :, :]
if cal_object.input_flag_array is not None:
cal_object.input_flag_array = cal_object.input_flag_array[
:, :, freq_inds, :, :
]
if cal_object.cal_type == "delay":
pass
else:
if cal_object.total_quality_array is not None:
cal_object.total_quality_array = cal_object.total_quality_array[
:, freq_inds, :, :
]
if jones is not None:
jones = uvutils._get_iterable(jones)
if np.array(jones).ndim > 1:
jones = np.array(jones).flatten()
if n_selects > 0:
history_update_string += ", jones polarization terms"
else:
history_update_string += "jones polarization terms"
n_selects += 1
jones_inds = np.zeros(0, dtype=np.int64)
for j in jones:
if isinstance(j, str):
j_num = uvutils.jstr2num(j, x_orientation=self.x_orientation)
else:
j_num = j
if j_num in cal_object.jones_array:
jones_inds = np.append(
jones_inds, np.where(cal_object.jones_array == j_num)[0]
)
else:
raise ValueError(
"Jones term {j} is not present in the jones_array".format(j=j)
)
jones_inds = sorted(set(jones_inds))
cal_object.Njones = len(jones_inds)
cal_object.jones_array = cal_object.jones_array[jones_inds]
if len(jones_inds) > 2:
jones_separation = (
cal_object.jones_array[1:] - cal_object.jones_array[:-1]
)
if np.min(jones_separation) < np.max(jones_separation):
warnings.warn(
"Selected jones polarization terms are not evenly spaced. This "
"is not supported by the calfits format"
)
if not self.metadata_only:
cal_object.flag_array = cal_object.flag_array[:, :, :, :, jones_inds]
cal_object.quality_array = cal_object.quality_array[
:, :, :, :, jones_inds
]
if cal_object.cal_type == "delay":
cal_object.delay_array = cal_object.delay_array[
:, :, :, :, jones_inds
]
else:
cal_object.gain_array = cal_object.gain_array[
:, :, :, :, jones_inds
]
if cal_object.input_flag_array is not None:
cal_object.input_flag_array = cal_object.input_flag_array[
:, :, :, :, jones_inds
]
if cal_object.total_quality_array is not None:
cal_object.total_quality_array = cal_object.total_quality_array[
:, :, :, jones_inds
]
history_update_string += " using pyuvdata."
cal_object.history = cal_object.history + history_update_string
# check if object is self-consistent
if run_check:
cal_object.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return cal_object
def _convert_from_filetype(self, other):
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
if filetype == "calfits":
from . import calfits
other_obj = calfits.CALFITS()
else:
raise ValueError("filetype must be calfits.")
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_calfits(
self,
filename,
read_data=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Read in data from calfits file(s).
Parameters
----------
filename : str or list of str
The calfits file(s) to read from.
read_data : bool
Read in the gains or delays, quality arrays and flag arrays.
If set to False, only the metadata will be read in. Setting read_data to
False results in a metadata only object.
run_check : bool
Option to check for the existence and proper shapes of
parameters after reading in the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
parameters after reading in the file.
"""
from . import calfits
if isinstance(filename, (list, tuple)):
self.read_calfits(
filename[0],
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
if len(filename) > 1:
for f in filename[1:]:
uvcal2 = UVCal()
uvcal2.read_calfits(
f,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self += uvcal2
del uvcal2
else:
calfits_obj = calfits.CALFITS()
calfits_obj.read_calfits(
filename,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self._convert_from_filetype(calfits_obj)
del calfits_obj
def read_fhd_cal(
self,
cal_file,
obs_file,
layout_file=None,
settings_file=None,
raw=True,
read_data=True,
extra_history=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Read data from an FHD cal.sav file.
Parameters
----------
cal_file : str or list of str
The cal.sav file or list of files to read from.
obs_file : str or list of str
The obs.sav file or list of files to read from.
layout_file : str
The FHD layout file. Required for antenna_positions to be set.
settings_file : str or list of str, optional
The settings_file or list of files to read from. Optional,
but very useful for provenance.
raw : bool
Option to use the raw (per antenna, per frequency) solution or
to use the fitted (polynomial over phase/amplitude) solution.
Default is True (meaning use the raw solutions).
read_data : bool
Read in the gains, quality array and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object. Note that if read_data is False, metadata is
derived entirely from the obs_file, which may result in slightly different
values than if it is derived from the cal file.
extra_history : str or list of str, optional
String(s) to add to the object's history parameter.
run_check : bool
Option to check for the existence and proper shapes of
parameters after reading in the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
parameters after reading in the file.
"""
from . import fhd_cal
if isinstance(cal_file, (list, tuple)):
if isinstance(obs_file, (list, tuple)):
if len(obs_file) != len(cal_file):
raise ValueError(
"Number of obs_files must match number of cal_files"
)
else:
raise ValueError("Number of obs_files must match number of cal_files")
if layout_file is not None:
if isinstance(layout_file, (list, tuple)):
if len(layout_file) != len(cal_file):
raise ValueError(
"Number of layout_files must match number of cal_files"
)
else:
raise ValueError(
"Number of layout_files must match number of cal_files"
)
layout_file_use = layout_file[0]
else:
layout_file_use = None
if settings_file is not None:
if isinstance(settings_file, (list, tuple)):
if len(settings_file) != len(cal_file):
raise ValueError(
"Number of settings_files must match number of cal_files"
)
else:
raise ValueError(
"Number of settings_files must match number of cal_files"
)
settings_file_use = settings_file[0]
else:
settings_file_use = None
self.read_fhd_cal(
cal_file[0],
obs_file[0],
layout_file=layout_file_use,
settings_file=settings_file_use,
raw=raw,
read_data=read_data,
extra_history=extra_history,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
if len(cal_file) > 1:
for ind, f in enumerate(cal_file[1:]):
uvcal2 = UVCal()
if settings_file is not None:
settings_file_use = settings_file[ind + 1]
if layout_file is not None:
layout_file_use = layout_file[ind + 1]
uvcal2.read_fhd_cal(
f,
obs_file[ind + 1],
layout_file=layout_file_use,
settings_file=settings_file_use,
raw=raw,
read_data=read_data,
extra_history=extra_history,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self += uvcal2
del uvcal2
else:
if isinstance(obs_file, (list, tuple)):
raise ValueError("Number of obs_files must match number of cal_files")
if layout_file is not None:
if isinstance(layout_file, (list, tuple)) and len(layout_file) > 1:
raise ValueError(
"Number of layout_files must match number of cal_files"
)
if settings_file is not None:
if isinstance(settings_file, (list, tuple)) and len(settings_file) > 1:
raise ValueError(
"Number of settings_files must match number of cal_files"
)
fhd_cal_obj = fhd_cal.FHDCal()
fhd_cal_obj.read_fhd_cal(
cal_file,
obs_file,
layout_file=layout_file,
settings_file=settings_file,
raw=raw,
read_data=read_data,
extra_history=extra_history,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self._convert_from_filetype(fhd_cal_obj)
del fhd_cal_obj
def write_calfits(
self,
filename,
run_check=True,
check_extra=True,
run_check_acceptability=True,
clobber=False,
):
"""
Write the data to a calfits file.
Parameters
----------
filename : str
The calfits file to write to.
run_check : bool
Option to check for the existence and proper shapes of
parameters before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
parameters before writing the file.
clobber : bool
Option to overwrite the filename if the file already exists.
Raises
------
ValueError
If the UVCal object is a metadata only object.
"""
if self.metadata_only:
raise ValueError(
"Cannot write out metadata only objects to a calfits file."
)
calfits_obj = self._convert_to_filetype("calfits")
calfits_obj.write_calfits(
filename,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
clobber=clobber,
)
del calfits_obj
|
asyn.py
|
import asyncio
import functools
import inspect
import re
import os
import sys
import threading
from .utils import other_paths, is_exception
from .spec import AbstractFileSystem
# this global variable holds whether this thread is running async or not
thread_state = threading.local()
private = re.compile("_[^_]")
def _run_until_done(coro):
"""execute coroutine, when already in the event loop"""
if sys.version_info < (3, 7): # pragma: no cover
raise RuntimeError(
"async file systems do not work completely on py<37. "
"The nested call currently underway cannot be processed. "
"Please downgrade your fsspec or upgrade python."
)
loop = asyncio.get_event_loop()
task = asyncio.current_task()
asyncio.tasks._unregister_task(task)
del asyncio.tasks._current_tasks[loop]
runner = loop.create_task(coro)
while not runner.done():
loop._run_once()
asyncio.tasks._current_tasks[loop] = task
return runner.result()
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
async def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
await asyncio.sleep(0)
thread_state.asynchronous = True
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = await future
except Exception:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
asyncio.run_coroutine_threadsafe(f(), loop=loop)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
def maybe_sync(func, self, *args, **kwargs):
"""Make function call into coroutine or maybe run
If we are running async, run coroutine on current loop until done;
otherwise runs it on the loop (if is a coroutine already) or directly. Will guess
we are running async if either "self" has an attribute asynchronous which is True,
or thread_state does (this gets set in ``sync()`` itself, to avoid nesting loops).
"""
loop = self.loop
# second condition below triggers if this is running in the thread of the
# event loop *during* the call to sync(), i.e., while running
# asynchronously
if getattr(self, "asynchronous", False) or getattr(
thread_state, "asynchronous", False
):
if inspect.iscoroutinefunction(func):
# run coroutine while pausing this one (because we are within async)
return _run_until_done(func(*args, **kwargs))
else:
# make awaitable which then calls the blocking function
return _run_as_coroutine(func, *args, **kwargs)
else:
if inspect.iscoroutinefunction(func):
# run the awaitable on the loop
return sync(loop, func, *args, **kwargs)
else:
# just call the blocking function
return func(*args, **kwargs)
async def _run_as_coroutine(func, *args, **kwargs):
# This is not currently used
return func(*args, **kwargs)
def sync_wrapper(func, obj=None):
"""Given a function, make so can be called in async or blocking contexts
Leave obj=None if defining within a class. Pass the instance if attaching
as an attribute of the instance.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = obj or args[0]
return maybe_sync(func, self, *args, **kwargs)
return wrapper
def async_wrapper(func):
"""Run a sync function on the event loop"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def get_loop():
"""Create a running loop in another thread"""
loop = asyncio.new_event_loop()
t = threading.Thread(target=loop.run_forever)
t.daemon = True
t.start()
return loop
# these methods should be implemented as async by any async-able backend
async_methods = [
"_ls",
"_cat_file",
"_get_file",
"_put_file",
"_rm_file",
"_cp_file",
"_pipe_file",
]
# these methods could be overridden, but have default sync versions which rely on _ls
# the sync methods below all call expand_path, which in turn may call walk or glob
# (if passed paths with glob characters, or for recursive=True, respectively)
default_async_methods = [
"_expand_path",
"_info",
"_isfile",
"_isdir",
"_exists",
"_walk",
"_glob",
"_find",
"_du",
]
class AsyncFileSystem(AbstractFileSystem):
"""Async file operations, default implementations
Passes bulk operations to asyncio.gather for concurrent operation.
Implementations that have concurrent batch operations and/or async methods
should inherit from this class instead of AbstractFileSystem. Docstrings are
copied from the un-underscored method in AbstractFileSystem, if not given.
"""
# note that methods do not have docstring here; they will be copied
# for _* methods and inferred for overridden methods.
async_impl = True
def __init__(self, *args, asynchronous=False, loop=None, **kwargs):
self.asynchronous = asynchronous
self.loop = loop or get_loop()
super().__init__(*args, **kwargs)
async def _rm(self, path, recursive=False, **kwargs):
await asyncio.gather(*[self._rm_file(p, **kwargs) for p in path])
def rm(self, path, recursive=False, **kwargs):
path = self.expand_path(path, recursive=recursive)
maybe_sync(self._rm, self, path, **kwargs)
async def _copy(self, paths, path2, **kwargs):
return await asyncio.gather(
*[self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)],
return_exceptions=True
)
def copy(
self, path1, path2, recursive=False, on_error=None, maxdepth=None, **kwargs
):
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = self.expand_path(path1, maxdepth=maxdepth, recursive=recursive)
path2 = other_paths(paths, path2)
result = maybe_sync(self._copy, self, paths, path2, **kwargs)
for ex in filter(is_exception, result):
if on_error == "ignore" and isinstance(ex, FileNotFoundError):
continue
raise ex
async def _pipe(self, path, value=None, **kwargs):
if isinstance(path, str):
path = {path: value}
await asyncio.gather(
*[self._pipe_file(k, v, **kwargs) for k, v in path.items()]
)
async def _cat(self, paths, **kwargs):
return await asyncio.gather(
*[
asyncio.ensure_future(self._cat_file(path, **kwargs), loop=self.loop)
for path in paths
],
return_exceptions=True
)
def cat(self, path, recursive=False, on_error="raise", **kwargs):
paths = self.expand_path(path, recursive=recursive)
out = maybe_sync(self._cat, self, paths, **kwargs)
if on_error == "raise":
ex = next(filter(is_exception, out), False)
if ex:
raise ex
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
return {
k: v
for k, v in zip(paths, out)
if on_error != "omit" or not is_exception(v)
}
else:
return out[0]
async def _put(self, lpaths, rpaths, **kwargs):
return await asyncio.gather(
*[
self._put_file(lpath, rpath, **kwargs)
for lpath, rpath in zip(lpaths, rpaths)
]
)
def put(self, lpath, rpath, recursive=False, **kwargs):
from .implementations.local import make_path_posix, LocalFileSystem
rpath = self._strip_protocol(rpath)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(lpaths, rpath)
maybe_sync(self._put, self, lpaths, rpaths, **kwargs)
async def _get(self, rpaths, lpaths, **kwargs):
dirs = [os.path.dirname(lp) for lp in lpaths]
[os.makedirs(d, exist_ok=True) for d in dirs]
return await asyncio.gather(
*[
self._get_file(rpath, lpath, **kwargs)
for lpath, rpath in zip(lpaths, rpaths)
]
)
def get(self, rpath, lpath, recursive=False, **kwargs):
from fsspec.implementations.local import make_path_posix
rpath = self._strip_protocol(rpath)
lpath = make_path_posix(lpath)
rpaths = self.expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
[os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
return sync(self.loop, self._get, rpaths, lpaths)
def mirror_sync_methods(obj):
"""Populate sync and async methods for obj
For each method will create a sync version if the name refers to an async method
(coroutine) and there is no override in the child class; will create an async
method for the corresponding sync method if there is no implementation.
Uses the methods specified in
- async_methods: the set that an implementation is expected to provide
- default_async_methods: that can be derived from their sync version in
AbstractFileSystem
- AsyncFileSystem: async-specific default coroutines
"""
from fsspec import AbstractFileSystem
for method in async_methods + default_async_methods + dir(AsyncFileSystem):
if not method.startswith("_"):
continue
smethod = method[1:]
if private.match(method):
isco = inspect.iscoroutinefunction(getattr(obj, method, None))
unsync = getattr(getattr(obj, smethod, False), "__func__", None)
is_default = unsync is getattr(AbstractFileSystem, smethod, "")
if isco and is_default:
mth = sync_wrapper(getattr(obj, method), obj=obj)
setattr(obj, smethod, mth)
if not mth.__doc__:
mth.__doc__ = getattr(
getattr(AbstractFileSystem, smethod, None), "__doc__", ""
)
elif (
hasattr(obj, smethod)
and inspect.ismethod(getattr(obj, smethod))
and not hasattr(obj, method)
):
setattr(obj, method, async_wrapper(getattr(obj, smethod)))
|
NamecheapDdnsService.py
|
import requests
import threading
import time
from core.Service import Service
from topics.ipchangenotification.IpChangeNotification import IpChangeNotification
from topics.notification.Notification import Notification
from topics.notification.NotificationLevel import NotificationLevel
class NamecheapDdnsService(Service):
def initialize(self):
self.url_pattern = 'https://dynamicdns.park-your-domain.com/update?host={host}&domain={domain}&password={password}'
self.ipChangeThread = None
self.ipChangeThreadSemaphore = threading.Semaphore()
self.core.dataRouter.subscribe(IpChangeNotification, self.updateIp)
def start(self):
pass
def updateIp(self, ip_change_notification):
self.ipChangeThreadSemaphore.acquire()
if self.ipChangeThread is not None:
self.ipChangeThread.stopIpChange()
self.ipChangeThread = IpChangeThread(ip_change_notification, self.config, self.core, self.url_pattern)
threading.Thread(target=self.ipChangeThread.changeIp).start()
self.ipChangeThreadSemaphore.release()
class IpChangeThread:
def __init__(self, ip_change_notification, config, core, url_pattern):
self.interruptThread = False
self.ip_change_notification = ip_change_notification
self.config = config
self.url_pattern = url_pattern
self.core = core
def changeIp(self):
while self.interruptThread is False:
host = self.config['Host']
domain = self.config['Domain']
password = self.config['Password']
repeatPeriodInSec = self.config.getint('RepeatPeriodInSec')
url = self.url_pattern.format(host=host, domain=domain, password=password)
try:
resp = requests.get(url)
if resp.status_code == 200:
self.core.dataRouter.publish(
Notification("Updated IP of " + domain + " to IP " + self.ip_change_notification.new_ip,
NotificationLevel.Info))
self.interruptThread = True
else:
self.core.logger.log('Failed to update : ' + resp.json())
self.core.dataRouter.publish(
Notification("Failed to update Namecheap's IP to " + self.ip_change_notification.new_ip,
NotificationLevel.Error))
except Exception as e:
self.core.dataRouter.publish(Notification("Failed to update IP", NotificationLevel.Error))
self.core.logger.logError("Error: NamecheapDdnsService returned the following error: ", e)
time.sleep(repeatPeriodInSec)
def stopIpChange(self):
self.interruptThread = True
|
wiki_title_to_freebase_mapping.py
|
"""
Map Wikipedia title to Freebase mid. Also obtain its labels and redirect links.
"""
import sys
import pickle
from multiprocessing import Process, Manager
import SPARQLWrapper
from url_conversion_helper import freebase_encode_article
__author__ = "Abhishek, Sanya B. Taneja, and Garima Malik"
__maintainer__ = "Abhishek"
def chunks(sentences, number_of_sentences):
"""
Split a list into N sized chunks.
"""
number_of_sentences = max(1, number_of_sentences)
return [sentences[i:i+number_of_sentences]
for i in range(0, len(sentences), number_of_sentences)]
def get_freebase_mid(article_title, retry_count=0):
"""
Return mid associated to a Wikipedia article link.
"""
# Wikipedia article link
query = ('''select distinct ?entity {?entity
<http://rdf.freebase.com/key/wikipedia.en_title>
"'''+ article_title + '''"} LIMIT 10''')
sparql.setQuery(query)
sparql.setReturnFormat(SPARQLWrapper.JSON)
try:
results = sparql.query().convert()
except:#pylint:disable=bare-except
if retry_count >= 3:
print("FAIL REQUEST:FBMID", article_title)
return ''
else:
retry_count += 1
return get_freebase_mid(article_title, retry_count=retry_count)
if len(results["results"]["bindings"]) >= 1: #should be a unique mid per page?
result = results["results"]["bindings"][0]
# mid found
mid = result["entity"]["value"]
else:
# mid not found
mid = ''
return mid
def get_freebase_redirects(mid, retry_count=0):
"""
Return list of redirect titles to wiki page mid.
"""
redirect_list = []
query = ('''prefix : <http://rdf.freebase.com/ns/>
select distinct ?entity_label
{ <'''+ mid +'''> <http://rdf.freebase.com/key/wikipedia.en> ?entity_label
} LIMIT 1000''')
sparql.setQuery(query)
sparql.setReturnFormat(SPARQLWrapper.JSON)
try:
results = sparql.query().convert()
except:#pylint:disable=bare-except
if retry_count >= 3:
print("FAIL REQUEST:REDIRECTS", mid)
return []
else:
retry_count += 1
return get_freebase_redirects(mid, retry_count=retry_count)
for result in results["results"]["bindings"]:
alternate_title = result["entity_label"]["value"]
if '$' in alternate_title:
alternate_title = alternate_title.replace(
'$', '\\u').encode('utf-8').decode('unicode-escape')
redirect_list.append(alternate_title)
return redirect_list
def get_freebase_labels(mid, retry_count=0):
"""
Return labels assigned by freebase to a particular mid.
"""
labels = []
query = ('''prefix : <http://rdf.freebase.com/ns/>
select distinct ?entity_label
{ <'''+ mid +'''> a ?entity_label
} LIMIT 200''')
sparql.setQuery(query)
sparql.setReturnFormat(SPARQLWrapper.JSON)
try:
results = sparql.query().convert()
except:#pylint:disable=bare-except
if retry_count >= 3:
print("FAIL REQUEST:LABELS", mid)
return []
else:
retry_count += 1
return get_freebase_labels(mid, retry_count=retry_count)
for result in results["results"]["bindings"]:
# print("label: ",result["entity_label"]["value"])
labels.append(
result["entity_label"]["value"][len('http://rdf.freebase.com/ns'):].replace('.', '/')
)
return labels
def process_list_of_title(article_titles, master_dict, uid):
"""
Obtain the necessary values from Freebase for a list of titles.
"""
count = 0
for article_title in article_titles:
freebase_encoded_title = freebase_encode_article(article_title)
fbmid = get_freebase_mid(freebase_encoded_title)
if fbmid:
fb_labels = get_freebase_labels(fbmid)
redirects = get_freebase_redirects(fbmid)
master_dict[article_title] = ({
'fbmid' : fbmid,
'labels' : fb_labels,
'alternate_titles' : redirects
})
count += 1
if count % 5000 == 0:
print(uid, count)
#pylint:disable=invalid-name
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python util/wikification/wiki_title_to_freebase_mapping.py',
'../data/raw/enwiki-latest-all-titles-in-ns0',
'../data/processed/wikification/title_to_fbmid.pickle')
sys.exit(1)
sparql = SPARQLWrapper.SPARQLWrapper("http://localhost:8890/sparql/")
all_wiki_titles = []
with open(sys.argv[1], 'r', encoding='utf-8') as file_p:
for title in filter(None, file_p.read().split('\n')):
all_wiki_titles.append(title)
manager = Manager()
title_to_freebase = manager.dict()
parts = chunks(all_wiki_titles, 1300000)
processes = []
for i, part in enumerate(parts):
processes.append(Process(target=process_list_of_title, args=(part, title_to_freebase, i)))
for p in processes:
p.start()
for p in processes:
p.join()
pickle.dump(dict(title_to_freebase), open(sys.argv[2], 'wb'), protocol=4)
|
verbal-memory.py
|
import sys
import time
from cv2 import cv2
import numpy as np
import mss
from itertools import islice
from pynput.mouse import Button, Controller
import pytesseract
import threading
import keyboard
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
flag2 = True
flag4 = False
mouse = Controller()
main_word_list = list()
time.sleep(1)
mouse.position = (950, 450)
mouse.click(Button.left, 1)
def get_number():
global flag2, flag4
while True:
stc = mss.mss()
scr = stc.grab(
{
"left": 790,
"top": 280,
"width": 300,
"height": 80,
}
)
frame = np.array(scr)
try:
print(pytesseract.image_to_string(frame).splitlines())
if pytesseract.image_to_string(frame).splitlines()[0] not in main_word_list:
main_word_list.append(pytesseract.image_to_string(frame).splitlines()[0])
mouse.position = (250 + 744, 230 + 152)
mouse.click(Button.left, 1)
time.sleep(0.3)
elif pytesseract.image_to_string(frame).splitlines()[0] in main_word_list:
mouse.position = (180 + 744, 230 + 152)
mouse.click(Button.left, 1)
time.sleep(0.3)
except IndexError:
pass
if flag2 == False:
break
threading.Thread(target=get_number).start()
while True:
stc = mss.mss()
scr = stc.grab(
{
"left": 744,
"top": 152,
"width": 420,
"height": 480,
}
)
frame = np.array(scr)
print(main_word_list)
try:
cv2.putText(frame, "word " + main_word_list[-1], (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, "word count " + str(len(main_word_list)), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
2)
except IndexError:
pass
cv2.imshow("main", frame)
cv2.setWindowProperty("main", cv2.WND_PROP_TOPMOST, 1)
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
cv2.waitKey(1)
flag2 = False
sys.exit()
|
trained.py
|
#/usr/bin/python
# -*- coding: utf-8 -*-
import io
import numpy as np
import argparse
import cv2
from cv2 import *
import picamera
import threading
from threading import Thread
import os
from os import listdir
from os.path import isfile, join, isdir
import sys
import math
import time
import imutils
from imutils.video.pivideostream import PiVideoStream
print "Initializing point tracking"
parser = argparse.ArgumentParser(description='Cast some spells! Recognize wand motions')
parser.add_argument('--train', help='Causes wand movement images to be stored for training selection.', action="store_true")
parser.add_argument('--circles', help='Use circles to select wand location', action="store_true")
args = parser.parse_args()
print(args.train)
print(args.circles)
# Parameters
lk_params = dict( winSize = (25,25),
maxLevel = 7,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
blur_params = (4,4)
dilation_params = (5, 5)
movment_threshold = 80
active = False
# start capturing
vs = PiVideoStream().start()
time.sleep(2.0)
run_request = True
frame_holder = vs.read()
frame = None
print "About to start."
knn = None
nameLookup = {}
def TrainOcr() :
global knn, nameLookup
labelNames = []
labelIndexes = []
trainingSet = []
numPics = 0
dirCount = 0
print "Getting script path."
scriptpath = os.path.realpath(__file__)
print "Script Path: " + scriptpath
mypath = os.path.dirname(scriptpath) + "/Pictures/"
print "Training directory:" + mypath
for d in listdir(mypath):
if isdir(join(mypath, d)):
nameLookup[dirCount] = d
dirCount = dirCount + 1
for f in listdir(join(mypath,d)):
if isfile(join(mypath,d,f)):
labelNames.append(d)
labelIndexes.append(dirCount-1)
trainingSet.append(join(mypath,d,f));
numPics = numPics + 1
print "Training set..."
print trainingSet
print "Labels..."
print labelNames
print "Indexes..."
print labelIndexes
print "Lookup..."
print nameLookup
samples = []
for i in range(0, numPics):
img = cv2.imread(trainingSet[i])
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
samples.append(gray);
npArray = np.array(samples)
shapedArray = npArray.reshape(-1,400).astype(np.float32);
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()
knn.train(shapedArray, cv2.ml.ROW_SAMPLE, np.array(labelIndexes))
lastTrainer = None
def CheckOcr(img):
global knn, nameLookup, args, lastTrainer
size = (20,20)
test_gray = cv2.resize(img,size,interpolation=cv2.INTER_LINEAR)
if args.train and img != lastTrainer:
cv2.imwrite("Pictures/char" + str(time.time()) + ".png", test_gray)
lastTrainer = img
imgArr = np.array(test_gray).astype(np.float32)
sample = imgArr.reshape(-1,400).astype(np.float32)
ret,result,neighbours,dist = knn.findNearest(sample,k=5)
print ret, result, neighbours, dist
if nameLookup[ret] is not None:
print "Match: " + nameLookup[ret]
return nameLookup[ret]
else:
return "error"
def FrameReader():
global frame_holder
print "Starting frame holder..."
t = threading.currentThread()
while getattr(t, "do_run", True):
frame = vs.read()
frame = imutils.resize(frame, width=400)
cv2.flip(frame,1,frame)
frame_holder = frame
time.sleep(.03);
def Spell(spell):
#Invoke IoT (or any other) actions here
return
if (spell=="center"):
print "trinket_pin trigger"
elif (spell=="circle"):
print "switch_pin OFF"
print "nox_pin OFF"
print "incendio_pin ON"
elif (spell=="eight"):
print "switch_pin ON"
print "nox_pin OFF"
print "incendio_pin OFF"
elif (spell=="left"):
print "switch_pin OFF"
print "nox_pin ON"
print "incendio_pin OFF"
elif (spell=="square"):
None
elif (spell=="swish"):
None
elif (spell=="tee"):
None
elif (spell=="triangle"):
None
elif (spell=="zee"):
None
print "CAST: %s" %spell
def GetPoints(image):
if args.circles is not True:
p0 = cv2.goodFeaturesToTrack(image, 5, .01, 30)
else:
p0 = cv2.HoughCircles(image,cv2.HOUGH_GRADIENT,3,50,param1=240,param2=8,minRadius=2,maxRadius=10)
if p0 is not None:
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
return p0;
def ProcessImage():
global frame_holder
frame = frame_holder.copy()
frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
th, frame_gray = cv2.threshold(frame_gray, 230, 255, cv2.THRESH_BINARY);
return frame_gray, frame
def FindWand():
global old_frame,old_gray,p0,mask, line_mask, run_request
try:
last = time.time()
t = threading.currentThread()
while getattr(t, "do_run", True):
now = time.time()
if run_request:
old_gray, old_frame = ProcessImage()
p0 = GetPoints(old_gray)
if p0 is not None:
mask = np.zeros_like(old_frame)
line_mask = np.zeros_like(old_gray)
run_request = False
last = time.time()
time.sleep(.3)
except cv2.error as e:
None
except:
e = sys.exc_info()[1]
#print "Error: %s" % e
def TrackWand():
global old_frame,old_gray,p0,mask, line_mask, color, frame, active, run_request
print "Starting wand tracking..."
color = (0,0,255)
# Create a mask image for drawing purposes
noPt = 0
while True:
try:
active = False
if p0 is not None:
active = True;
frame_gray, frame = ProcessImage();
cv2.imshow("Original", frame_gray)
# calculate optical flow
newPoints = False
if p0 is not None and len(p0) > 0:
noPt = 0
try:
if old_gray is not None and frame_gray is not None:
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
newPoints = True
except cv2.error as e:
None
except:
print "."
continue
else:
noPt = noPt + 1
if noPt > 10:
try:
im2, contours,hierarchy = cv2.findContours(line_mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv2.boundingRect(cnt)
crop = line_mask[y-10:y+h+10,x-30:x+w+30]
result = CheckOcr(crop);
cv2.putText(line_mask, result, (0,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
Spell(result)
if line_mask:
cv2.imshow("Raspberry Potter", line_mask)
line_mask = np.zeros_like(line_mask)
print ""
finally:
noPt = 0
run_request = True
if newPoints:
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
cv2.line(line_mask, (a,b),(c,d),(255,255,255), 10)
if line_mask is not None:
cv2.imshow("Raspberry Potter", line_mask)
else:
if frame is not None:
cv2.imshow("Original", frame)
run_request = True
time.sleep(.3)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
except IndexError:
run_request = True
except cv2.error as e:
None
#print sys.exc_info()
except TypeError as e:
None
print "Type error."
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
except KeyboardInterrupt as e:
raise e
except:
None
#print sys.exc_info()
#print "Tracking Error: %s" % e
key = cv2.waitKey(10)
if key in [27, ord('Q'), ord('q')]: # exit on ESC
cv2.destroyAllWindows()
break
try:
TrainOcr()
t = Thread(target=FrameReader)
t.do_run = True
t.start()
find = Thread(target=FindWand)
find.do_run = True
find.start()
print "START incendio_pin ON and set switch off if video is running"
time.sleep(2)
TrackWand()
except KeyboardInterrupt:
print("Shutting down...")
finally:
t.do_run = False
find.do_run = False
t.join()
find.join()
cv2.destroyAllWindows()
vs.stop()
sys.exit(1)
|
__init__.py
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
# Operates on sound fragments consisting of signed integer samples 8, 16
# or 32 bits wide, stored in Python strings.
import audioop
from contextlib import contextmanager
from ctypes import CFUNCTYPE, c_char_p, c_int, cdll
from threading import Thread
import speech_recognition as sr
import pyaudio # Provides Python bindings for PortAudio, the cross platform audio API
from dragonfire import VirtualAssistant
from dragonfire.sr.deepspeech.config import ConfigDeepSpeech
from dragonfire.sr.deepspeech.server import SpeechServerMain
from dragonfire.sr.exceptions import UnknownSpeechRecognitionMode
import numpy as np
CHUNK = 8000 # Smallest unit of audio. 1024 bytes
FORMAT = pyaudio.paInt16 # Data format
CHANNELS = 1 # Number of channels
RATE = 16000 # Bit Rate of audio stream / Frame Rate
THRESHOLD = 1000 # Threshhold value for detecting stimulant
LISTENING = False
class SpeechRecognizer():
def __init__(self, mode):
# logging.basicConfig(level=logging.INFO)
self.__class__.finished = False
self.modes = ["deepspeech", 'gspeech']
if mode not in self.modes:
raise UnknownSpeechRecognitionMode();
else:
self.mode = mode
if self.mode == 'gspeech':
self.silence_detection = 3
else:
self.silence_detection = 1
@classmethod
def set_finished(cls, finished):
cls.finished = finished
def reset(self):
self.__class__.finished = False
def recognize(self, her):
with noalsaerr():
p = pyaudio.PyAudio() # Create a PyAudio session
# Create a stream
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
try:
data = stream.read(CHUNK) # Get first data frame from the microphone
# Loop over the frames of the audio / data chunks
audio = None
# print("START LISTENNING")
while data != '':
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk
if rms >= THRESHOLD: # If Root Mean Square value is greater than THRESHOLD constant
audio = data
silence_counter = 0 # Define silence counter
# While silence counter value less than SILENCE_DETECTION constant
while silence_counter < self.silence_detection:
data = stream.read(CHUNK) # Read a new chunk from the stream
if LISTENING:
stream.write(data, CHUNK)
audio = audio + data
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk again
if rms < THRESHOLD: # If Root Mean Square value is less than THRESHOLD constant
silence_counter += 1 # Then increase silence counter
else: # Else
silence_counter = 0 # Assign zero value to silence counter
# print("Analyzing...")
stream.stop_stream()
if self.mode == 'deepspeech':
audio = np.fromstring(audio, dtype=np.int16) # Fix data type
com = SpeechServerMain.ds.stt(audio, RATE)
stream.start_stream()
# print(com)
t = Thread(target=her.command, args=(com,))
t.start()
elif self.mode == 'gspeech':
audio_data = sr.AudioData(audio, RATE, p.get_sample_size(FORMAT))
try:
com = self.recognizer.recognize_google(audio_data)
print(com)
t = Thread(target=her.command, args=(com,))
t.start()
except sr.UnknownValueError:
# print("Google Speech Recognition could not understand audio")
pass
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
stream.start_stream()
else:
break
self.reset()
data = stream.read(CHUNK) # Read a new chunk from the stream
if LISTENING:
stream.write(data, CHUNK)
except KeyboardInterrupt:
stream.stop_stream()
stream.close()
p.terminate()
# self.loop.quit()
raise KeyboardInterrupt
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noalsaerr():
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
|
AUTO (another copy).py
|
'''
NOTES
Goal: Make nn take the second-best from the previous prediction.
Result:
THIS ALSO SEEMS TO BE A BUST. KEEPS GETTING STUCK IN THESE LOOPS OF (predict; ctrl-z), ETC.
'''
# import car
import cv2
import numpy as np
import os
# import serial
import socket
import threading
import time
from imutils.object_detection import non_max_suppression
from keras.layers import Dense, Activation
from keras.models import Sequential
import keras.models
dir_log = ['Forward']
SIGMA = 0.33
stop_classifier = cv2.CascadeClassifier('cascade_xml/stop_sign.xml')
timestr = time.strftime('%Y%m%d_%H%M%S')
class RCDriver(object):
def steer(self, prediction):
# FORWARD
if np.all(prediction == [ 0., 0., 1.]):
# car.forward(150)
# car.pause(300)
time.sleep(0.3)
dir_log.append('Forward')
print 'Forward'
# FORWARD-LEFT
elif np.all(prediction == [ 1., 0., 0.]):
# car.left(300)
# car.forward_left(200)
# car.left(700)
# car.pause(200)
time.sleep(0.2)
dir_log.append('Left')
print 'Left'
# FORWARD-RIGHT
elif np.all(prediction == [ 0., 1., 0.]):
# car.right(300)
# car.forward_right(200)
# car.right(700)
# car.pause(200)
time.sleep(0.2)
dir_log.append('Right')
print 'Right'
def stop(self):
print '* * * STOPPING! * * *'
car.pause(5000)
rcdriver = RCDriver()
class ObjectDetection(object):
global rcdriver
global stop_classifier
def detect(self, cascade_classifier, gray_image, image):
# STOP SIGN
stop_sign_detected = cascade_classifier.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=10,
minSize=(50, 50),
maxSize=(55, 55))
# Draw a rectangle around stop sign
for (x_pos, y_pos, width, height) in stop_sign_detected:
cv2.rectangle(image, (x_pos+5, y_pos+5), (x_pos+width-5, y_pos+height-5), (0, 0, 255), 2)
cv2.putText(image, 'STOP SIGN', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# Execute the full stop
if np.any(stop_sign_detected):
rcdriver.stop()
# PEDESTRIAN
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
# Look for predestrians in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Draw the ORIGINAL bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Apply 'non-maxima suppression' to the bounding boxes using a fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# Draw the FINAL bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.putText(image, 'PEDESTRIAN', (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
obj_detection = ObjectDetection()
class TrustButVerify(object):
global dir_log
def __init__(self):
# Arbitrarily designating a 'corner' as some % of width from either edge (e.g. 15%)
self.corner_pct = .40
def scan_for_signal(self, filtered_img):
# Lower Left and Right corners
last_row = filtered_img[-1]
img_total_width = len(last_row)
img_corner_width = img_total_width * self.corner_pct
left_corner = last_row[ : img_corner_width + 1]
right_corner = last_row[ -img_corner_width : ]
# GOAL: Need a sum of 255 in both corners, which means at least the edge of a lane marker is visible in a corner
# If either corner < 255, then return False to activate ctrl-z mode
if sum(left_corner) < 255 or sum(right_corner) < 255:
print '\nSIGNAL IN ONE CORNER NOT PRESENT'
return False
return True
def ctrl_z(self):
print '< < < CTRL-Z MODE > > >'
last_dir = dir_log[-1]
# Forward -> Reverse
if last_dir == 'Forward':
# car.reverse(200)
# car.pause(500)
time.sleep(0.5)
print '< REVERSE >\n'
# Left -> Reverse-Left
elif last_dir == 'Left':
# car.left(300)
# car.reverse_left(275)
# car.left(700)
# car.pause(500)
time.sleep(0.5)
print '< REVERSE-LEFT >\n'
# Right -> Reverse-Right
elif last_dir == 'Right':
# car.right(300)
# car.reverse_right(275)
# car.right(700)
# car.pause(500)
time.sleep(0.5)
print '< REVERSE-RIGHT >\n'
return
TBV = TrustButVerify()
class NeuralNetwork(object):
global stop_classifier
global timestr
def __init__(self, receiving=False, piVideoObject=None):
self.receiving = receiving
self.model = keras.models.load_model('nn_h5/nn.h5')
# PiVideoStream class object is now here.
self.piVideoObject = piVideoObject
self.rcdriver = RCDriver()
print 'NeuralNetwork init OK'
self.fetch()
def auto_canny(self, blurred):
# Compute the median of the single channel pixel intensities
global SIGMA
v = np.median(blurred)
# Apply automatic Canny edge detection using the computed median of the image
lower = int(max(0, (1.0 - SIGMA) * v))
upper = int(min(255, (1.0 + SIGMA) * v))
edged = cv2.Canny(blurred, lower, upper)
return edged
def preprocess(self, frame):
image_array = frame.reshape(1, 38400).astype(np.float32)
image_array = image_array / 255.
return image_array
def predict(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
# First choice
i_max_first = np.argmax(y_hat)
y_hat_final_first = np.zeros((1,4))
np.put(y_hat_final_first, i_max_first, 1)
# Need to convert y_hat to a list to sort and find the second best pred.
y_hat_list = []
for each in y_hat[0]:
y_hat_list.append(each)
# Second choice
i_max_second = np.argsort(y_hat_list)[::-1][1]
y_hat_final_second = np.zeros((1,4))
np.put(y_hat_final_second, i_max_second, 1)
first_choice_pred = y_hat_final_first[0]
second_choice_pred = y_hat_final_second[0]
return first_choice_pred, second_choice_pred, y_hat
def fetch(self):
frame = 0
second_best = None
previous_probas = None
pred_rank = None
while self.receiving:
# There's a chance that the Main thread can get to this point before the New thread begins streaming images.
# To account for this, we create the jpg variable but set to None, and keep checking until it actually has something.
jpg = None
while jpg is None:
jpg = self.piVideoObject.frame
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# Object detection
obj_detection.detect(stop_classifier, gray, image)
# Lower half of the grayscale image
roi = gray[120:240, :]
# Apply GuassianBlur (reduces noise)
blurred = cv2.GaussianBlur(roi, (3, 3), 0)
# Apply Canny filter
auto = self.auto_canny(blurred)
# Show streaming images
cv2.imshow('Original', image)
cv2.imshow('What the model sees', auto)
# *** NEW FEATURE: Trust but verify (TBV) ***
# Check for signal in lower corners of image (boolean). If True, then s'all good. If Not, then...
if not TBV.scan_for_signal(auto):
if frame == 0:
continue
# TBV.ctrl_z() takes car back one step, and 'prediction' is now the second_best from previous run.
TBV.ctrl_z()
prediction = second_best
probas = previous_probas
pred_rank = 'second'
# If TBV.scan_for_signal returned True, then all is well. ctrl_z_mode is False, and model makes prediciton on argmax proba.
else:
first_choice, second_choice, probas = self.predict(auto)
second_best = second_choice # second_choice from this run is assigned to global var, in case it's needed in next run.
previous_probas = probas
prediction = first_choice
pred_rank = 'first'
# Save frame and prediction record for debugging research
prediction_english = None
prediction_english_proba = None
proba_left, proba_right, proba_forward, proba_backward = probas[0]
if np.all(prediction == [ 0., 0., 1., 0.]):
prediction_english = 'FORWARD'
prediction_english_proba = proba_forward
elif np.all(prediction == [ 1., 0., 0., 0.]):
prediction_english = 'LEFT'
prediction_english_proba = proba_left
elif np.all(prediction == [ 0., 1., 0., 0.]):
prediction_english = 'RIGHT'
prediction_english_proba = proba_right
# Text on saved image
cv2.putText(gray, "Prediction ({}): {}, {:>05}".format(pred_rank, prediction_english, prediction_english_proba), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Forward: {}".format(proba_forward), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Left: {}".format(proba_left), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Right: {}".format(proba_right), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
# cv2.imwrite('test_frames_temp/frame{:>05}.jpg'.format(frame), gray)
frame += 1
# Send prediction to driver to tell it how to steer
self.rcdriver.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.stop()
cv2.destroyAllWindows()
class PiVideoStream(object):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.server_socket.bind(('192.168.1.66', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
self.server_socket.bind(('172.14.1.126', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
print 'Listening...'
self.server_socket.listen(0)
# Accept a single connection ('rb' is 'read binary')
self.connection = self.server_socket.accept()[0].makefile('rb')
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
self.stream_bytes = ' '
self.start()
def start(self):
# start the thread to read frames from the video stream
print 'Starting PiVideoStream thread...'
print ' \"Hold on to your butts!\" '
# Start a new thread
t = threading.Thread(target=self.update, args=())
t.daemon=True
t.start()
print '...thread running'
# Main thread diverges from the new thread and activates the neural_network
# The piVideoObject argument ('self') passes the PiVideoStream class object to NeuralNetwork.
NeuralNetwork(receiving=True, piVideoObject=self)
def update(self):
while True:
self.stream_bytes += self.connection.read(1024)
first = self.stream_bytes.find('\xff\xd8')
last = self.stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
self.frame = self.stream_bytes[first:last + 2]
self.stream_bytes = self.stream_bytes[last + 2:]
def read(self):
# return the frame most recently read
return self.frame
if __name__ == '__main__':
try:
# Create an instance of PiVideoStream class
video_stream = PiVideoStream()
except KeyboardInterrupt:
car.stop()
# Rename the folder that collected all of the test frames. Then make a new folder to collect next round of test frames.
os.rename( './test_frames_temp', './test_frames_SAVED/test_frames_{}'.format(timestr))
os.makedirs('./test_frames_temp')
print '\nTerminating...\n'
# Close video_stream thread.
video_stream = PiVideoStream()
video_stream.connection.close()
print '\nDone.\n'
|
test_callbacks.py
|
import os
import multiprocessing
import numpy as np
import pytest
from numpy.testing import assert_allclose
from csv import reader
from csv import Sniffer
import shutil
from collections import defaultdict
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add, dot, Lambda, Layer
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling1D
from keras.layers import GlobalAveragePooling2D
from keras.layers import BatchNormalization
from keras.utils.test_utils import get_test_data
from keras.utils.generic_utils import to_list
from keras.utils.generic_utils import unpack_singleton
from keras import backend as K
from keras.utils import np_utils
try:
from unittest.mock import patch
except:
from mock import patch
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
def data_generator(x, y, batch_size):
x = to_list(x)
y = to_list(y)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
y_batch = [array[i * batch_size: (i + 1) * batch_size] for array in y]
y_batch = unpack_singleton(y_batch)
yield x_batch, y_batch
i += 1
i = i % max_batch_index
# Changing the default arguments of get_test_data.
def get_data_callbacks(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes):
return get_test_data(num_train=num_train,
num_test=num_test,
input_shape=input_shape,
classification=classification,
num_classes=num_classes)
class Counter(callbacks.Callback):
"""Counts the number of times each callback method was run.
# Arguments
method_counts: dict, contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_train_batch_begin', 'on_train_batch_end',
'on_test_batch_begin', 'on_test_batch_end',
'on_predict_batch_begin', 'on_predict_batch_end',
'on_train_begin', 'on_train_end', 'on_predict_begin', 'on_predict_end',
'on_test_begin', 'on_test_end',
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class TestCallbackCounts(object):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
count = counter.method_counts[method_name]
assert count == expected_count, \
'For method {}: expected {}, got: {}'.format(
method_name, expected_count, count)
def _get_model(self):
layers = [
Dense(10, activation='relu', input_dim=input_dim),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers=layers)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
def test_callback_hooks_are_called_in_fit(self):
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks(num_train=10,
num_test=4)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=2, epochs=5, callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1,
})
def test_callback_hooks_are_called_in_evaluate(self):
np.random.seed(1337)
(_, _), (X_test, y_test) = get_data_callbacks(num_test=10)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.evaluate(X_test, y_test, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_predict(self):
np.random.seed(1337)
(_, _), (X_test, _) = get_data_callbacks(num_test=10)
model = self._get_model()
counter = Counter()
model.predict(X_test, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_batch_begin': 0,
'on_test_batch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_fit_generator(self):
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks(num_train=10,
num_test=4)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
train_generator = data_generator(X_train, y_train, batch_size=2)
validation_generator = data_generator(X_test, y_test, batch_size=2)
model = self._get_model()
counter = Counter()
model.fit_generator(train_generator, steps_per_epoch=len(X_train) // 2,
epochs=5, validation_data=validation_generator,
validation_steps=len(X_test) // 2, callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1,
})
def test_callback_hooks_are_called_in_evaluate_generator(self):
np.random.seed(1337)
(_, _), (X_test, y_test) = get_data_callbacks(num_test=10)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.evaluate_generator(data_generator(X_test, y_test, batch_size=2),
steps=len(X_test) // 2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_predict_generator(self):
np.random.seed(1337)
(_, _), (X_test, _) = get_data_callbacks(num_test=10)
def data_generator(x, batch_size):
x = to_list(x)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [
array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
yield x_batch
i += 1
i = i % max_batch_index
model = self._get_model()
counter = Counter()
model.predict_generator(data_generator(X_test, batch_size=2),
steps=len(X_test) // 2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_batch_begin': 0,
'on_test_batch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
history = model.fit_generator(data_generator(X_train, y_train, batch_size),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield (np.ones([batch_size, input_dim]) * np.nan,
np.ones([batch_size, num_classes]) * np.nan)
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3,
# as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
def test_EarlyStopping_baseline():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
def baseline_tester(acc_levels):
early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75,
patience=2)
early_stop.model = DummyModel()
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(acc_levels)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})
if early_stop.model.stop_training:
break
return epochs_trained
acc_levels = [0.55, 0.76, 0.81, 0.81]
baseline_met = baseline_tester(acc_levels)
acc_levels = [0.55, 0.74, 0.81, 0.81]
baseline_not_met = baseline_tester(acc_levels)
# All epochs should run because baseline was met in second epoch
assert baseline_met == 4
# Baseline was not met by second epoch and should stop
assert baseline_not_met == 2
def test_EarlyStopping_final_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in the epoch 2 (loss = 0.1000),
# so with patience=2 we need to end up at epoch 4
assert early_stop.model.get_weights() == 4
def test_EarlyStopping_final_weights_when_restoring_model_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
assert early_stop.model.get_weights() == 2
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
def test_ReduceLROnPlateau_backwards_compatibility():
import warnings
with warnings.catch_warnings(record=True) as ws:
reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)
# Check if warnings are disabled
if os.environ.get("PYTHONWARNINGS") != "ignore":
assert "`epsilon` argument is deprecated" in str(ws[0].message)
assert not hasattr(reduce_on_plateau, 'epsilon')
assert hasattr(reduce_on_plateau, 'min_delta')
assert reduce_on_plateau.min_delta == 1e-13
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
import re
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
@pytest.mark.parametrize('update_freq', ['batch', 'epoch', 9])
def test_TensorBoard(tmpdir, update_freq):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
class DummyStatefulMetric(Layer):
def __init__(self, name='dummy_stateful_metric', **kwargs):
super(DummyStatefulMetric, self).__init__(name=name, **kwargs)
self.stateful = True
self.state = K.variable(value=0, dtype='int32')
def reset_states(self):
pass
def __call__(self, y_true, y_pred):
return self.state
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
hidden = BatchNormalization()(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy', DummyStatefulMetric()])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1, write_images=True,
write_grads=True):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=write_images,
write_grads=write_grads,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5,
update_freq=update_freq)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=2)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0, write_images=False,
write_grads=False),
epochs=2)
# fit generator without validation data
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
write_images=False,
write_grads=False,
embeddings_freq=0))
# fit generator with validation data and accuracy
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1,
write_images=False,
write_grads=False))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1, write_images=True,
write_grads=True):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=write_images,
write_grads=write_grads,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5)]
# fit without validation data should raise ValueError if histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
assert 'validation_data must be provided' in str(raised_exception.value)
train_generator = data_generator(X_train, y_train, batch_size)
validation_generator = data_generator(X_test, y_test, batch_size)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator,
len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1,
write_images=False,
write_grads=False))
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=validation_generator,
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1,
write_images=False,
write_grads=False))
assert 'validation_data must be provided' in str(raised_exception.value)
def test_TensorBoard_multi_input_output(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks(
input_shape=(input_dim, input_dim))
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp1 = Input((input_dim, input_dim))
inp2 = Input((input_dim, input_dim))
inp_3d = add([inp1, inp2])
inp_2d = GlobalAveragePooling1D()(inp_3d)
# test a layer with a list of output tensors
inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d])
hidden = dot(inp_pair, axes=-1)
hidden = Dense(num_hidden, activation='relu')(hidden)
hidden = Dropout(0.1)(hidden)
output1 = Dense(num_classes, activation='softmax')(hidden)
output2 = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=[inp1, inp2], outputs=[output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1, write_images=True,
write_grads=True):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=write_images,
write_grads=write_grads,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=[X_test] * 2,
batch_size=5)]
# fit without validation data
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1, write_images=False,
write_grads=False),
epochs=2)
train_generator = data_generator([X_train] * 2, [y_train] * 2, batch_size)
# fit generator without validation data
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
embeddings_freq=0,
write_images=False,
write_grads=False))
# fit generator with validation data and accuracy
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1,
write_images=False,
write_grads=False))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_TensorBoard_convnet(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_data_callbacks(
num_train=500,
num_test=200,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
BatchNormalization(),
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_TensorBoard_display_float_from_logs(tmpdir):
filepath = str(tmpdir / 'logs')
input_shape = (3,)
(x_train, y_train), _ = get_data_callbacks(num_train=10,
num_test=0,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
model = Sequential([
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
class CustomCallback(callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
logs['test'] = 0.
tsb = callbacks.TensorBoard(log_dir=filepath,
batch_size=16)
cbks = [CustomCallback(), tsb]
model.fit(x_train, y_train, epochs=2, batch_size=16,
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and
# be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(
on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def tests_RemoteMonitor():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor()]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
def tests_RemoteMonitorWithJsonPayload():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor(send_as_json=True)]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
|
run_fed_neural_double_dice.py
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import sys
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from tf_agents.trajectories import time_step as ts
from tf_agents.policies import actor_policy
from dice_rl.environments.env_policies import get_target_policy
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.tree as tree
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators.neural_dice import NeuralDice
from dice_rl.optimizers.neural_pgdice import NeuralPgDice
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.networks.value_network import ValueNetwork
from dice_rl.networks.policy_network import PolicyNetwork
from dice_rl.networks.tabular_policy_network import TabularSoftmaxPolicyNetwork
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
'''
TODO:clean up import once done debugging
'''
import dice_rl.utils.common as common_lib
from dice_rl.environments.infinite_cartpole import InfiniteCartPole
import threading
# BEGIN GOOGLE-INTERNAL
# import google3.learning.deepmind.xmanager2.client.google as xm
# END GOOGLE-INTERNAL
'''
Remove this log blocker once retracing problem is solved
'''
# import logging
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
# logging.getLogger('tensorflow').setLevel(logging.FATAL)
load_dir ='./tests/testdata/grid5'
save_dir = './tests/testdata/'
env_name ='small_grid'
seed = [0,1,2,3,4,5,6,7]
tabular_obs = True
num_trajectory = 400
max_trajectory_length = 100
alpha = 0.0
gamma = 0.99
nu_learning_rate = 0.00003
zeta_learning_rate = 0.00003
policy_learning_rate = 0.001
nu_regularizer = 0.0
zeta_regularizer = 0.0
num_steps = 1000000
batch_size = 2048
f_exponent = 2
primal_form = False
primal_regularizer = 1.
dual_regularizer = 1.
zero_reward = False
norm_regularizer = 1.
zeta_pos = True
scale_reward = 1.
shift_reward = 0.
transform_reward = None
eval_trajectory = 100
eval_trajectory_length = 100
opt_interval = 1000
eval_interval = 1000
warmup_step = 30000
double_dual_regularizer = 1e-6
n_worker = 5
global nu, nu_double, zeta, zeta_double, theta
def reward_fn(env_step):
reward = env_step.reward * scale_reward + shift_reward
if transform_reward is None:
return reward
if transform_reward == 'exp':
reward = tf.math.exp(reward)
elif transform_reward == 'cuberoot':
reward = tf.sign(reward) * tf.math.pow(tf.abs(reward), 1.0 / 3.0)
else:
raise ValueError('Reward {} not implemented.'.format(transform_reward))
return reward
def get_distribution_table(load_dir, env_name, tabular_obs=True, alpha=0.):
''' Gives the distribution table of a policy alpha-close to optimal policy. '''
init_policy = get_target_policy(load_dir, env_name, tabular_obs, alpha=alpha)
n_state = init_policy.time_step_spec.observation.maximum - \
init_policy.time_step_spec.observation.minimum + 1
state_range = tf.range(n_state)
tfagent_timestep = ts.restart(state_range, state_range.shape[0])
init_dist = init_policy.distribution(tfagent_timestep).info['distribution']
return init_dist
datasets, nu_estimators, zeta_estimators, optimizers = [],[],[],[]
for i in range(n_worker):
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed[i],
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
train_hparam_str = (
'nlr{NLR}_zlr{ZLR}_zeror{ZEROR}_preg{PREG}_dreg{DREG}_nreg{NREG}_'
'pform{PFORM}_fexp{FEXP}_zpos{ZPOS}_'
'scaler{SCALER}_shiftr{SHIFTR}_transr{TRANSR}').format(
NLR=nu_learning_rate,
ZLR=zeta_learning_rate,
ZEROR=zero_reward,
PREG=primal_regularizer,
DREG=dual_regularizer,
NREG=norm_regularizer,
PFORM=primal_form,
FEXP=f_exponent,
ZPOS=zeta_pos,
SCALER=scale_reward,
SHIFTR=shift_reward,
TRANSR=transform_reward)
if save_dir is not None:
save_dir = os.path.join(save_dir, hparam_str, train_hparam_str)
summary_writer = tf.summary.create_file_writer(logdir=save_dir)
summary_writer.set_as_default()
else:
tf.summary.create_noop_writer()
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset from', directory)
dataset = Dataset.load(directory)
datasets.append(dataset)
all_steps = dataset.get_all_steps()
max_reward = tf.reduce_max(all_steps.reward)
min_reward = tf.reduce_min(all_steps.reward)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('min reward', min_reward, 'max reward', max_reward)
print('behavior per-step',
estimator_lib.get_fullbatch_average(dataset, gamma=gamma))
target_dataset = Dataset.load(directory)
# target_dataset = Dataset.load(
# directory.replace('alpha{}'.format(alpha), 'alpha1.0'))
print('target per-step',
estimator_lib.get_fullbatch_average(target_dataset, gamma=1.))
activation_fn = tf.nn.relu
kernel_initializer = tf.keras.initializers.GlorotUniform()
hidden_dims = (64, 64)
input_spec = (dataset.spec.observation, dataset.spec.action)
output_spec = dataset.spec.action
nu_network = ValueNetwork(
input_spec,
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=kernel_initializer)
double_nu_network = ValueNetwork(
input_spec,
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=kernel_initializer)
output_activation_fn = tf.math.square if zeta_pos else tf.identity
zeta_network = ValueNetwork(
input_spec,
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
output_activation_fn=output_activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=kernel_initializer)
double_zeta_network = ValueNetwork(
input_spec,
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
output_activation_fn=output_activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=kernel_initializer)
if common_lib.is_categorical_spec(dataset.spec.observation) and \
common_lib.is_categorical_spec(dataset.spec.action):
init_dist = get_distribution_table(load_dir, env_name, alpha=alpha)
init_dist = (1-4/5) * tf.ones(init_dist.shape)/init_dist.shape[1] + \
4/5 * init_dist
policy_network = TabularSoftmaxPolicyNetwork(
dataset.spec.observation,
output_spec,
initial_distribution=init_dist)
else:
policy_network = PolicyNetwork(
dataset.spec.observation,
output_spec)
nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate, clipvalue=1.0)
double_nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate, clipvalue=1.0)
zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate, clipvalue=1.0)
double_zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate, clipvalue=1.0)
lam_optimizer = tf.keras.optimizers.Adam(nu_learning_rate, clipvalue=1.0)
policy_optimizer = tf.keras.optimizers.Adam(policy_learning_rate)
nu_estimator = NeuralDice(
dataset.spec,
nu_network,
double_zeta_network,
nu_optimizer,
double_zeta_optimizer,
lam_optimizer,
gamma,
zero_reward=zero_reward,
f_exponent=f_exponent,
primal_form=primal_form,
reward_fn=reward_fn,
primal_regularizer=primal_regularizer,
dual_regularizer=double_dual_regularizer,
norm_regularizer=0.,
nu_regularizer=nu_regularizer,
zeta_regularizer=zeta_regularizer)
zeta_estimator = NeuralDice(
dataset.spec,
double_nu_network,
zeta_network,
double_nu_optimizer,
zeta_optimizer,
lam_optimizer,
gamma,
zero_reward=zero_reward,
f_exponent=f_exponent,
primal_form=primal_form,
reward_fn=reward_fn,
primal_regularizer=0.,
dual_regularizer=dual_regularizer,
norm_regularizer=norm_regularizer,
nu_regularizer=nu_regularizer,
zeta_regularizer=zeta_regularizer)
optimizer = NeuralPgDice(
dataset.spec,
policy_network,
nu_network,
zeta_network,
policy_optimizer,
gamma)
nu_estimators.append(nu_estimator)
zeta_estimators.append(zeta_estimator)
optimizers.append(optimizer)
def main(argv):
global_step = tf.Variable(0, dtype=tf.int64)
tf.summary.experimental.set_step(global_step)
lock = threading.Lock()
t0 = threading.Thread(target=train_local, args=(0, lock,))
t1 = threading.Thread(target=train_local, args=(1, lock,))
t2 = threading.Thread(target=train_local, args=(2, lock,))
t3 = threading.Thread(target=train_local, args=(3, lock,))
t4 = threading.Thread(target=train_local, args=(4, lock,))
t0.start()
t1.start()
t2.start()
t3.start()
t4.start()
t0.join()
t1.join()
t2.join()
t3.join()
t4.join()
def synchronization(kk):
global nu, nu_double, zeta, zeta_double, theta
nu, zeta_double = nu_estimators[0].get_parameters()
nu_double, zeta = zeta_estimators[0].get_parameters()
theta = optimizers[0].get_policy_parameters()
for kk in range(1, n_worker):
nu_i, zeta_double_i = nu_estimators[kk].get_parameters()
nu_double_i, zeta_i = zeta_estimators[kk].get_parameters()
theta_i = optimizers[kk].get_policy_parameters()
for ll in range(len(nu)):
nu[ll] = nu[ll] + nu_i[ll]
nu_double[ll] = nu_double[ll] + nu_double_i[ll]
for ll in range(len(zeta)):
zeta[ll] = zeta[ll] + zeta_i[ll]
zeta_double[ll] = zeta_double[ll] + zeta_double_i[ll]
for ll in range(len(theta)):
theta[ll] = theta[ll] + theta_i[ll]
for ll in range(len(nu)):
nu[ll] = nu[ll] / n_worker
nu_double[ll] = nu_double[ll] / n_worker
for ll in range(len(zeta)):
zeta[ll] = zeta[ll] / n_worker
zeta_double[ll] = zeta_double[ll] / n_worker
for ll in range(len(theta)):
theta[ll] = theta[ll] / n_worker
nu_estimators[kk].update(nu, zeta_double)
zeta_estimators[kk].update(nu_double, zeta)
optimizers[kk].update(theta)
def train_local(kk, lock):
avg_rews = []
env, target_policy = get_env_tfpolicy(env_name, policy_network, tabular_obs)
avg_rew = optimizer.evaluate_policy(env, eval_trajectory*2, eval_trajectory_length, target_policy)
avg_rews.append(avg_rew)
print('Initial running avg reward:',np.mean(avg_rews))
for step in range(num_steps):
lock.acquire()
synchronization(kk)
lock.release()
transitions_batch = datasets[kk].get_step(batch_size, num_steps=2)
initial_steps_batch, _ = datasets[kk].get_episode(batch_size, truncate_episode_at=1)
initial_steps_batch = tf.nest.map_structure(lambda t: t[:, 0, ...],
initial_steps_batch)
losses = nu_estimators[kk].train_step(initial_steps_batch, transitions_batch,
target_policy)
losses = zeta_estimators[kk].train_step(initial_steps_batch, transitions_batch,
target_policy)
if step % eval_interval == 0 or step == num_steps - 1:
estimate = optimizers[kk].estimate_average_reward(dataset, target_policy)
if (step-warmup_step) % opt_interval == 0 and step >= warmup_step:
optimizers[kk].get_zeta_normalizer(datasets[kk])
policy_loss = optimizers[kk].fullbatch_train_step(dataset)
policy = optimizers[kk].get_policy_network()
_, target_policy = get_env_tfpolicy(env_name, policy,
tabular_obs)
if (step-warmup_step) % eval_interval == 0:
avg_rew = optimizers[kk].evaluate_policy(env, eval_trajectory, eval_trajectory_length,
target_policy)
avg_rews.append(avg_rew)
print('step', step, 'Running avg reward:',np.mean(avg_rews))
'''
TODO: Move these functions to other files once done debugging
'''
def get_env_tfpolicy(env_name,
policy_network,
tabular_obs=False,
env_seed=0):
''' Converts a policy network to a TFPolicy. '''
if env_name == 'grid':
env = navigation.GridWalk(tabular_obs=tabular_obs)
env.seed(env_seed)
tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env))
policy = actor_policy.ActorPolicy(
tf_env.time_step_spec(),
tf_env.action_spec(),
policy_network)
elif env_name == 'small_grid':
env = navigation.GridWalk(length=5,tabular_obs=tabular_obs)
env.seed(env_seed)
tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env))
policy = actor_policy.ActorPolicy(
tf_env.time_step_spec(),
tf_env.action_spec(),
policy_network)
elif env_name == 'cartpole':
env = InfiniteCartPole()
env.seed(env_seed)
tf_env = tf_py_environment.TFPyEnvironment(gym_wrapper.GymWrapper(env))
policy = actor_policy.ActorPolicy(
tf_env.time_step_spec(),
tf_env.action_spec(),
policy_network)
else:
raise ValueError('Unrecognized environment %s.' % env_name)
return env, policy
if __name__ == '__main__':
app.run(main)
|
test_uploader.py
|
import os
import time
import threading
import unittest
import logging
import json
from selfdrive.swaglog import cloudlog
import selfdrive.loggerd.uploader as uploader
from common.xattr import getxattr
from selfdrive.loggerd.tests.loggerd_tests_common import UploaderTestCase
class TestLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.reset()
def reset(self):
self.upload_order = list()
self.upload_ignored = list()
def emit(self, record):
try:
j = json.loads(record.message)
if j["event"] == "upload_success":
self.upload_order.append(j["key"])
if j["event"] == "upload_ignored":
self.upload_ignored.append(j["key"])
except Exception:
pass
log_handler = TestLogHandler()
cloudlog.addHandler(log_handler)
class TestUploader(UploaderTestCase):
def setUp(self):
super(TestUploader, self).setUp()
log_handler.reset()
def tearDown(self):
super(TestUploader, self).tearDown()
def start_thread(self):
self.end_event = threading.Event()
self.up_thread = threading.Thread(target=uploader.uploader_fn, args=[self.end_event])
self.up_thread.daemon = True
self.up_thread.start()
def join_thread(self):
self.end_event.set()
self.up_thread.join()
def gen_files(self, lock=False):
f_paths = list()
for t in ["bootlog.bz2", "qlog.bz2", "rlog.bz2", "dcamera.hevc", "fcamera.hevc"]:
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock))
return f_paths
def gen_order(self, seg1, seg2):
keys = [f"{self.seg_format.format(i)}/qlog.bz2" for i in seg1]
keys += [f"{self.seg_format.format(i)}/rlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/qlog.bz2" for i in seg2]
keys += [f"{self.seg_format2.format(i)}/rlog.bz2" for i in seg2]
for i in seg1:
keys += [f"{self.seg_format.format(i)}/{f}" for f in ['fcamera.hevc','dcamera.hevc']]
for i in seg2:
keys += [f"{self.seg_format2.format(i)}/{f}" for f in ['fcamera.hevc','dcamera.hevc']]
keys += [f"{self.seg_format.format(i)}/bootlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/bootlog.bz2" for i in seg2]
return keys
def test_upload(self):
f_paths = self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(f_paths), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(f_paths), "Some files were uploaded twice")
for f_path in f_paths:
self.assertTrue(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_upload_ignored(self):
self.set_ignore()
f_paths = self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored")
self.assertFalse(len(log_handler.upload_ignored) < len(f_paths), "Some files failed to ignore")
self.assertFalse(len(log_handler.upload_ignored) > len(f_paths), "Some files were ignored twice")
for f_path in f_paths:
self.assertTrue(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "All files not ignored")
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order")
def test_upload_files_in_create_order(self):
f_paths = list()
seg1_nums = [0,1,2,10,20]
for i in seg1_nums:
self.seg_dir = self.seg_format.format(i)
f_paths += self.gen_files()
seg2_nums = [5,50,51]
for i in seg2_nums:
self.seg_dir = self.seg_format2.format(i)
f_paths += self.gen_files()
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(f_paths), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(f_paths), "Some files were uploaded twice")
for f_path in f_paths:
self.assertFalse(os.path.exists(f_path), "All files not uploaded")
#exp_order = self.gen_order(seg1_nums, seg2_nums)
#self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_no_upload_with_lock_file(self):
f_paths = self.gen_files(lock=True)
self.start_thread()
# allow enough time that files should have been uploaded if they would be uploaded
time.sleep(5)
self.join_thread()
for f_path in f_paths:
self.assertFalse(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "File upload when locked")
if __name__ == "__main__":
unittest.main()
|
search_by_db.py
|
from fake_useragent import UserAgent
import requests
from time import sleep
import datetime
from model import WeiboInfo, WeiboTask, engine
from sqlalchemy.orm import sessionmaker
from pyquery import PyQuery as pq
import random
Session = sessionmaker(bind=engine)
session = Session()
ua = UserAgent(verify_ssl=False)
cookies = ""
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Language': 'zh-cn',
'Cookie': cookies,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
import queue
import threading
class Weibo():
def __init__(self):
self.urlqueue = queue.Queue()
self.sec_urlqueue = queue.Queue()
self.canshu_queue = queue.Queue()
Session = sessionmaker(bind=engine)
self.session = Session()
def get_data(self,url):
sleep(1.5)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'User-Agent': ua.random}
# , proxies = proxy
print(url)
data = requests.get(url, headers=headers).text
# print(data)
return data
def parse_html(self,pl_feedlist_index, begintime,pageurl):
canshu_list=[]
for i in pl_feedlist_index.find('.card-wrap').items():
canshu = {}
card_feed = (i.find('.card-feed'))
content = card_feed.find('.content')
name = content.find('.name').text()
name_link = content.find('.name').attr('href')
txt = content.find('.txt').text()
weibo_from = content.find('.from').text()
card_act = i.find(('.card-act'))
feed_list_forward = 0
feed_list_comment = 0
feed_list_like = 0
for i in card_act.find('li').items():
# print(i.text())
if '转发' in i.text():
feed_list_forward = (i.text()).replace('转发', '')
continue
elif '评论' in i.text():
feed_list_comment = (i.text()).replace('评论', '')
continue
feed_list_like = (i.text())
if feed_list_forward == '':
feed_list_forward = 0
if feed_list_comment == '':
feed_list_comment = 0
if feed_list_like == '':
feed_list_like = 0
print(name, name_link, weibo_from, feed_list_forward, feed_list_comment, feed_list_like)
canshu['page_url'] = pageurl
canshu['name'] = name
canshu['name_link'] = name_link
canshu['weibo_from'] = weibo_from
canshu['txt'] = txt
canshu['feed_list_forward'] = feed_list_forward
canshu['feed_list_comment'] = feed_list_comment
canshu['feed_list_like'] = feed_list_like
canshu['search_time'] = begintime
canshu_list.append(canshu)
self.canshu_queue.put(canshu_list)
def req_index(self):
while True:
if self.urlqueue.qsize()%5==0:
sleep(10)
task_item=self.urlqueue.get()
url=task_item.get('url')
flag=task_item.get('flag')
id=task_item.get('id')
time=task_item.get('time')
pageurl = str(url).replace("indexpage", str(1))
data = self.get_data(pageurl)
doc = pq(data)
pl_feedlist_index = doc.find('#pl_feedlist_index')
if pl_feedlist_index.find('.card-no-result'):
self.urlqueue.task_done()
weibo_task =session.query(WeiboTask).filter_by(id=id).first()
weibo_task.flag='5'
session.commit()
continue
page=1
for i in pl_feedlist_index.find('.m-page .list .s-scroll li').items():
page = i.text().replace('第', '').replace('页', '')
print(page)
if int(page) > 0:
weibo_task = session.query(WeiboTask).filter_by(id=id).first()
weibo_task.flag = '1'
session.commit()
for page_num in range(1, int(page) + 1):
sec_url_item={}
pageurl = str(url).replace("indexpage", str(page_num))
sec_url_item['id']=id
sec_url_item['url']=pageurl
sec_url_item['time']=time
self.sec_urlqueue.put(sec_url_item)
self.urlqueue.task_done()
def seconde_run(self):
while True:
sec_task_item =self.sec_urlqueue.get()
pageurl =sec_task_item.get('url')
time =sec_task_item.get('time')
id =sec_task_item.get('id')
data = self.get_data(pageurl)
doc = pq(data)
pl_feedlist_index = doc.find('#pl_feedlist_index')
if pl_feedlist_index.find('.card-no-result'):
self.sec_urlqueue.task_done()
continue
self.parse_html(pl_feedlist_index,time,pageurl)
self.sec_urlqueue.task_done()
def insert(self):
while True:
canshu_list =self.canshu_queue.get()
for canshu in canshu_list:
weibo_info = WeiboInfo()
weibo_info.page_url = canshu.get('page_url')
weibo_info.name = canshu.get('name')
weibo_info.name_link = canshu.get('name_link')
weibo_info.weibo_from = canshu.get('weibo_from')
weibo_info.txt = canshu.get('txt')
weibo_info.feed_list_forward = canshu.get('feed_list_forward')
weibo_info.feed_list_comment = canshu.get('feed_list_comment')
weibo_info.feed_list_like = canshu.get('feed_list_like')
weibo_info.search_time = canshu.get('search_time')
self.session.add(weibo_info)
self.session.flush()
self.session.commit()
self.canshu_queue.task_done()
def run(self):
weibotask = self.session.query(WeiboTask).filter(WeiboTask.flag == '0').order_by(WeiboTask.time.desc()).all()
for i in weibotask:
task_item={}
task_item['id']=i.id
task_item['url']=i.url
task_item['flag']=i.flag
task_item['time']=i.time
self.urlqueue.put(task_item)
thread_list =[]
for i in range(1):
Treq_page = threading.Thread(target=self.req_index)
thread_list.append(Treq_page)
for i in range(100):
secTreq_page = threading.Thread(target=self.seconde_run)
thread_list.append(secTreq_page)
for i in range(1):
sqlTreq_page = threading.Thread(target=self.insert)
thread_list.append(sqlTreq_page)
for t in thread_list:
t.setDaemon(True)
t.start()
for q in [self.urlqueue,self.sec_urlqueue,self.canshu_queue]:
q.join()
if __name__ == '__main__':
weib = Weibo()
weib.run()
|
main.py
|
try:
from dotenv import load_dotenv
load_dotenv()
except:
pass
from db import *
from twitch import *
from tt import *
from utils import *
import sys
import time
import schedule
import threading
def main():
# Variável que controla se o houve
# modificações no dados do streamer
modified = False
# Definir tokens e header
access_token, header = getOAuth()
# DataFrame com os dados dos streamers
streamers = readStreamers()
# Se não estiver vazio vamos pegar os IDs
if not streamers.empty:
# Verificar se o streamer está registado na DB
results = returnStreamerNames().fetchall()
# Guardar o nome dos streamers já registados
names = []
for r in results:
names.append(*r)
# Retorno de todos os streamers que não estão na BD
streamers = deleteExistStreamers(streamers, names)
# Retornar o dataframe com o id de cada novo streamer
streamers = getStreamerId(streamers, header)
# Inserir cada streamer na BD
insertStreamers(streamers)
if names:
# DataFrame com os dados dos streamers
streamers = readStreamers()
# Retornar todas as infos dos streamers na DB
results = returnStreamerInfo().fetchall()
# Preencher o dataframe com os Ids
for streamer in results:
name = streamer[0]
idt = streamer[1]
index = streamers[streamers["Nome"] == str(name)].index
streamers.loc[index, "Id"] = str(idt)
# Antes de tudo vamos verificar se algum streamer
# trocou o nome do canal
# print(streamers)
streamers, modified = nameChanged(streamers, header)
if modified:
# Guardar alterações no .csv
updateCSV(streamers)
# Ler novamente
streamers = readStreamers()
results = returnStreamerInfo().fetchall()
# Verificar se o streamer está em live ou não
for streamer in results:
idt = streamer[1]
if isStreamerLive(str(idt), header):
title = getStreamTitle(idt, header)
# Remover comandos do título
title = removeCmdsFromTitle(title)
# Verificar se ele já estava live antes
# se sim não fazemos outra vez o tweet
# se não fazemos o tweet
is_live = streamer[4]
if not is_live:
twitch = streamer[2]
twitter = streamer[3]
isPrint = streamer[5]
streamer_type = streamer[6]
hashtags = streamer[7]
# Vamos fazer o tweet
insertOnStream(idt, True)
tweet(
twitch,
twitter,
title,
isPrint,
streamer_type,
hashtags,
)
else:
insertOnStream(idt, False)
else:
print("O DataFrame está vazio!")
def threaded_job(job):
# Função para correr a main em modo threading
thread = threading.Thread(target=main)
thread.start()
# Esperar pela thread terminar
thread.join()
if __name__ == "__main__":
schedule.every(15).seconds.do(threaded_job, main)
while True:
schedule.run_pending()
# Performance measure
time.sleep(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.